input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
ddt) list."""
if self._status == 0:
assert type(self._n_div) is int, (
"The number of division must be defined and it must be an integer type."
);
assert self._d_set and hasattr(self._d_set, '__iter__'), (
"The dataset must be not null and iterable type."
);
size = len(self._d_set);
ndiv = self._n_div;
# division
def f(size, ndiv):
q1 = int(size / ndiv);
r = size - q1 * ndiv;
s2 = r;
s1 = ndiv - s2;
return (s1, q1), (s2, q1 + 1);
(s1, n1), (s2, n2) = f(size, ndiv);
k = 0;
for i in range(s1):
# info(f'{k = } {len(range(k, (k + n1))) = }');
yield (self.dexc,
{
'dset' : self._d_set,
'dx' : range(k, (k + n1)),
},
);
k = k + n1;
for i in range(s2):
# info(f'{k = } {len(range(k, (k + n2))) = }');
yield (self.dexc,
{
'dset' : self._d_set,
'dx' : range(k, (k + n2)),
},
);
k = k + n2;
elif self._status == 1:
yield (self.exec, self._local_data,);
else:
raise StopIteration();
class ProcSeq(BaseProc):
## This class represent the structure of a sequence of processings to execute
## in a process. This execution is powored by the kernel.
## The kernel is implemented later.
## The instance of this class must be iterable.
def __init__(self, name=None):
"""Constructor of an instance of sequence of processing."""
super(ProcSeq, self).__init__(name=name);
self.__procs = [];
# The above attribut represent a processing instances list.
@property
def procs(self):
return self.__procs;
def add_proc(self, proc):
"""This recursive function is used to add the instructions in this counter"""
if type(proc) is list:
for p in proc: self.add_proc(p);
else:
assert isinstance(proc, BaseProc), (
"This argument must be an processing instance."
);
self.__procs.append(proc);
return proc;
def init_f(self, state: object):
"""Function that is used for preprocessing program of processing sequence.
This function can be redefined by the programmer."""
pass;
def __iter__(self):
"""Defining of customized iteration."""
return iter(self.__procs);
class Inst(object):
## This object represent an elementary instruction executable by the processor
def __init__(self, f, args: tuple):
"""Constructor of the elementary executable instruction"""
super(Inst, self).__init__();
# We check if the function passed in argument is callable
assert callable(f), (
"The `f` argement must be a callable function"
);
self.__f = f;
self.__args = args;
@property
def f(self):
return self.__f;
@property
def args(self):
return self.__args;
class Processor(object):
# This is the structure of a processor.
# This object will execute the instructions will receive.
def __init__(self, cpuc=None):
"""Constructor of a processor"""
super(Processor, self).__init__();
# Internalle variables of processor
self.__status = None;
# self.__odc = OrdinalCounter();
# Defining of CPU count
# =====================
#
# PS: It's butter that the CPU count is left than the CPU count of
# physical processor
if cpuc is None: self.__cpu_count = os.cpu_count();
else: self.__cpu_count = cpuc;
# I remove a CPU because the current thread uses one CPU
self.__cpu_count = self.__cpu_count - 1;
self.__cpu = cf.ThreadPoolExecutor(max_workers=self.__cpu_count);
self.__future_map = {};
info("%16d | CPU count" % (self.__cpu_count,));
# The callback functions
self.__eicb = lambda x: x;
self.__ecb = lambda y: y;
def put(self, f, args):
self.__future_map[self.__cpu.submit(f, *args)] = len(self.__future_map);
return True;
def set_eicb(self, cb):
"""Function of end task callback setting"""
assert callable(cb), (
"This function is not callable."
);
self.__eicb = cb;
return cb;
def set_ecb(self, cb):
"""Function of en callback setting"""
assert callable(cb), (
"This function is not callable."
);
self.__ecb = cb;
return cb;
def recr(self):
for future in cf.as_completed(self.__future_map):
yield future.result();
def free(self):
del self.__cpu;
return True;
class Kernel(CProcess):
# This structure represent the kernel. The sheduler of process of processing.
# His role is to allocate an unique process foreach processing to execute.
# A kernel is also a process.
def __init__(self, *args, **kwargs):
"""Contructor of the kernel instance"""
super(Kernel, self).__init__(*args, **kwargs);
self.__status = None; # The kernel's status
self.__exqueue = Queue() # Exchange queue between process
self.__process = {}; # The process dictionary indexed by their PID
# powered by this kernel
@property
def status(self):
return self.__status;
def get_process_ins(q: Queue):
"""Function that is used to return a process instance using his PID
It's return False, if the process instance is not exists."""
return self.__process.get(q, False);
def start_proc(self, proc: ProcSeq, state: object):
"""Function that is used to start a processing in a new process."""
# if proc is not null, then we can continue
assert proc is not None, (
"""The `proc` which represents the processing instance must be not None."""
);
# we can prepare and start the process of our processing and try to send
# the initialize state to it.
q = Queue();
p = Process(target=Kernel.__start_exec, args=(self, proc, state, q));
# pid = len(self.__process);
# self.__process[pid] = p;
self.__process[q] = p;
p.start();
# waite for 10ms second, before to return the process instance
# and his queue
# time.sleep(0.010);
return p, q;
def exec_proc(self, proc: ProcSeq, state: object):
"""Function which allows to execute directly a processing"""
return self.__exec(proc, state);
def __start_exec(self, procs: ProcSeq, state: object, q: Queue):
"""Function which allows to start execution of processing."""
state, logger = self.__exec(procs, state);
del self.__process[q];
q.put((state, logger));
def wait_result(self, q: Queue):
"""Function used to wait and get the returned resusult of processing sequence."""
assert q is not None, (
"None type is not authorized."
);
try:
# while q.empty(): pass;
result = q.get();
q.close();
del q
return result;
except KeyboardInterrupt:
r = input("\n\n[?] Do you want to exit this program ? [y/n] ");
if r.lower() == 'y'\
or r.lower() == 'yes':
sys.exit(0);
return False;
# def __get_insts(self, proc: Proc, state: object):
# """This function allows you to extract from a processing the elementaries
# instruction to send to ordinal coounter of our processor."""
# # If the processing instance is a MultProc, then we segment it according
# # to each element of the dataset.
# # Exemple:
# # FOR data in dataset
# # proc_f(data);
# #
# # If the processing instance is a simple Proc, the we consider the proc_f
# # implementation for an elementary instruction.
# assert proc is not None, (
# "The processing instance passed by argument must be not None."
# );
# # we define the instruction lists
# insts = [];
# # in first, we check if the processing is an instance of MultProc
# # if it's the case, then we apply the segmentation according to each
# # element of the dataset
# # if isinstance(proc, MulProc):
# # for data in proc:
# # q = Queue();
# # q.put({'state': initstate, 'dset': data});
# #
# # inst = Inst(proc.exec, (q,));
# # insts.append(inst);
# #
# if isinstance(proc, Proc):
# k = 0;
# for _f_, args in proc:
# assert callable(_f_), (
# "The function returned by processing must be callable."
# );
# inst = Inst(_f_, (state, args));
# insts.append(inst);
# else:
# raise TypeError(
# "The processing instance must be a Proc type."
# );
# info("Recovery of elementary instruction for ordinal counter... DONE !");
# return insts;
# def __exec_with_processor(self, insts):
# """Function which executes an instructions list with a processor instance"""
# # we get a new processor instance, and we initialize his ordinal counter
# # with elementary instructions.
# processor = Processor();
# info("Loading of instruction into ordinal counter...");
# processor.odc.add_inst(insts);
# insts.clear();
# del insts;
# info("Execution started ...");
# return processor.exec();
def __elreg(self, e: Exception, logger: Logger):
if not isinstance(e, StopProcessing):
if len(e.args) > 0: logger.err(Error(message=e.args[0]).show());
else:
logger.err(Error(message=f"{str(type(e))} type error is detected.").show());
return logger;
else:
raise StopProcessing();
def __procexf(self, procs: ProcSeq, state: object, processor: Processor, logger: Logger):
returned = None;
try:
returned = procs.init_f(state);
except Exception as e:
logger = self.__elreg(e, logger);
if STOPONFE:
raise StopProcessing();
for proc in procs:
try:
if isinstance(proc, Proc):
# execution of initalization function of processing
info(f"[{proc.name}] Processing started ...");
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui # Import the PyQt4 module we'll need
import sys # We need sys so that we can pass argv to QApplication
import os
import subprocess # So pyoperant can run for each box without blocking the rest of the GUI
import serial # To connect directly to Teensys for water control
import time
import threading # Support subprocess, allow error messages to be passed out of the subprocess
import Queue # Support subprocess, allow error messages to be passed out of the subprocess
import pyudev # device monitoring to identify connected Teensys
import re # Regex, for parsing device names returned from pyudev to identify connected Teensys
import argparse # Parse command line arguments for GUI, primarily to enable debug mode
from shutil import copyfile # For creating new json file by copying another
import logging, traceback
import datetime as dt # For auto sleep
# import string # for modifying strings from the data
# import collections # allows use of ordered dictionaries
import io # for copying cells from analysis table
from contextlib import contextmanager # facilitates simple implementation of 'waiting' mouse cursor when loading
import gc # garbage collection, to help prevent memory leak
import pyaudio # soundcheck functionality
import wave # soundcheck functionality
import numpy # calculating rms
import scipy # calculating specific frequencies
from scipy.io import wavfile
from pyoperant import analysis, utils # Analysis creates the data summary tables
import csv # For exporting data summaries as csv files
try:
import simplejson as json
except ImportError:
import json
try: # Allows proper formatting of UTF-8 characters from summaryDAT file
_from_utf8 = QtCore.QString.fromUtf8
except AttributeError:
def _from_utf8(s):
return s
import pyoperant_gui_layout
def _log_except_hook(*exc_info): # How uncaught errors are handled
text = "".join(traceback.format_exception(*exc_info))
print(text) # print out in case email log isn't working
logging.error("Unhandled exception: {}".format(text))
class PyoperantGui(QtGui.QMainWindow, pyoperant_gui_layout.UiMainWindow):
"""
Main class for running the pyoperant GUI.
- subprocessBox is a variable that tracks the subprocess ID of a subprocess. In this case specifically,
it tracks the pyoperant subprocess. It is set to 0 when the subprocess has been stopped and should not be running
(i.e. if user clicked "stop box" or pyoperant crashed, which was caught by the GUI.
- Alternatively it gets set to 1 if the box should be set to 'sleep' mode, meaning pyoperant should be stopped
temporarily and restarted in the morning. This was added to help combat the intermittent and unexplained instances
of Teensys ceasing to respond to computer input
"""
teensy_emit = QtCore.pyqtSignal(int, str)
class DeviceInfo:
# Extracts device info from pyudev output: box number, device ID, USB device number
def __init__(self, device):
deviceString = device.device_links.next()
self.log = logging.getLogger(__name__)
try:
# Get box number
deviceStringSplit = os.path.split(deviceString)
boxLink = deviceStringSplit[1]
boxLinkSplit = re.split('Board(\\d*)', boxLink)
self.boxNumber = int(boxLinkSplit[1])
self.boxIndex = self.boxNumber - 1 # Teensy numbers are indexed from 1, but box array indexed from 0
except IndexError:
self.boxNumber = None
self.boxIndex = None
self.log.error('Device Error: board not recognized')
if self.boxIndex is not None:
# Get device ID (e.g. "tty...")
self.device_path = os.path.split(device.device_path)
self.deviceID = self.device_path[1]
# Get USB port info
usbPath = device.parent.parent.device_node
usbSplit = re.split('/', usbPath)
self.usbBus = int(usbSplit[-2])
self.usbDevice = int(usbSplit[-1])
self.usbString = 'Bus {:02d}, Device {:02d}'.format(int(self.usbBus), int(self.usbDevice))
else:
self.device_path = None
self.deviceID = None
self.usbBus = None
self.usbDevice = None
self.usbString = None
def __init__(self):
super(self.__class__, self).__init__()
with wait_cursor(): # set mouse cursor to 'waiting'
# Set up layout and widgets
testing = False
# Number of boxes declared in pyoperant_gui_layout.py
if testing:
boxCount = 7
boxCoords = [(1, 0), (2, 0), (3, 0), (1, 1), (2, 1), (3, 1), (0, 1)]
gridSize = (4, 2)
# ANY VARS THAT AFFECT LAYOUT SETUP NEED TO BE DEFINED BEFORE HERE
self.setup_ui(self, box_count=boxCount, window_dim=gridSize, box_coords=boxCoords)
else:
boxCount = 6
# ANY VARS THAT AFFECT LAYOUT SETUP NEED TO BE DEFINED BEFORE HERE
self.setup_ui(self, box_count=boxCount)
self.debug = False
# region Menu bar
mainMenu = QtGui.QMenuBar()
fileMenu = mainMenu.addMenu('&File')
analyzeGuiAction = QtGui.QAction("Analy&ze", self)
analyzeGuiAction.triggered.connect(lambda _, b=1: self.analyze_performance(b))
fileMenu.addAction(analyzeGuiAction)
analyzeActiveGuiAction = QtGui.QAction("&Analyze Current", self)
analyzeActiveGuiAction.triggered.connect(lambda _, b='all': self.analyze_performance(b))
fileMenu.addAction(analyzeActiveGuiAction)
quitGuiAction = QtGui.QAction("&Quit", self)
quitGuiAction.triggered.connect(self.close)
fileMenu.addAction(quitGuiAction)
globalOptionsMenu = mainMenu.addMenu('Options')
autosleepMenu = QtGui.QMenu('Autosleep', self)
nrTrialMenu = QtGui.QMenu('NR Trials', self)
# global options for GUI
self.ui_options = {}
viewGuiLogAction = QtGui.QAction("View GUI Log", self)
viewGuiLogAction.triggered.connect(lambda _, b='guilog': self.open_text_file(0, whichfile=b))
viewGuiErrorAction = QtGui.QAction("View GUI Error Log", self)
viewGuiErrorAction.triggered.connect(lambda _, b='guierror': self.open_text_file(0, whichfile=b))
self.ui_options['use_nr_all'] = QtGui.QAction("Include NR trials (all)", self)
self.ui_options['use_nr_all'].setCheckable(True)
self.ui_options['use_nr_all'].triggered.connect(self.use_nr_trials_all)
self.ui_options['autosleep_all'] = QtGui.QAction("Enable autosleep (all)", self)
self.ui_options['autosleep_all'].setCheckable(True)
self.ui_options['autosleep_all'].setChecked(True)
self.ui_options['autosleep_all'].triggered.connect(self.auto_sleep_set_all)
globalOptionsMenu.addAction(viewGuiLogAction)
globalOptionsMenu.addAction(viewGuiErrorAction)
globalOptionsMenu.addSeparator()
globalOptionsMenu.addMenu(autosleepMenu)
globalOptionsMenu.addMenu(nrTrialMenu)
nrTrialMenu.addAction(self.ui_options['use_nr_all'])
nrTrialMenu.addSeparator()
autosleepMenu.addAction(self.ui_options['autosleep_all'])
autosleepMenu.addSeparator()
self.setMenuBar(mainMenu)
# endregion
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.refreshall)
self.timer.start(5000)
self.idleTime = 1.5 # max idle time in hours before restarting pyoperant process
self.log_config()
# region Monitor when USB devices are connected/disconnected
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='tty')
observer = pyudev.MonitorObserver(monitor, self.usb_monitor, name='usb-observer')
observer.daemon = True
observer.start()
self.teensy_emit.connect(
(lambda triggered_boxnumber, parameter: self.teensy_control(triggered_boxnumber, parameter)))
# endregion
# arrays for queues and threads
self.qList = [0] * self.numberOfBoxes
self.tList = [0] * self.numberOfBoxes
# self.qReadList = [0] * self.numberOfBoxes # list of queues for inputs to subprocesses
# self.tReadList = [0] * self.numberOfBoxes # list of queues for inputs to subprocesses
# Connect 'global' buttons to functions
self.startAllButton.clicked.connect(lambda: self.start_all())
self.stopAllButton.clicked.connect(lambda: self.stop_all())
self.subprocessBox = [0] * self.numberOfBoxes # stores subprocesses for pyoperant for each box
self.logProcessBox = [0] * self.numberOfBoxes # stores subprocesses for log reading for each box
self.logpathList = [0] * self.numberOfBoxes # stores log file path for each box
# region Variable setup
self.boxList = range(0, self.numberOfBoxes)
self.deviceIDList = []
self.deviceLocationList = []
self.experimentPath = ""
# Option var init
self.boxMenuList = []
self.fileSelectActionList = []
self.solenoidMenuList = []
self.primeActionList = []
self.purgeActionList = []
self.solenoidManualList = []
self.soundCheckActionList = []
self.optionsMenuList = []
self.openFolderActionList = []
self.openSettingsActionList = []
self.createNewJsonList = []
self.newBirdActionList = []
self.statsActionList = []
self.rawTrialActionList = []
self.useNRList = []
self.autoSleepList = []
self.openBoxLogActionList = []
self.lastStartList = []
self.lastTrialList = []
self.sleepScheduleList = [] # schedule is none if box not active, set when box started
self.defaultSleepSchedule = [["08:30", "22:30"]]
# endregion Variable setup
# region Individual option menu setup
"""
To add an item to the option menu:
- Add a blank list var to the "option var init" section for the action to be stored for each box
- Figure out whether the new option should be in the main option menu or in a submenu
- in the "Option Menu Setup" section, add two lines:
self.{list var}.append(QtGui.QAction({action name as str}, self)
(or QtGui.QMenu({menu name as str}))
self.{parent menu}[boxIndex].addAction(self.{list var}[boxIndex])
(or addMenu)
- If adding an action, go to the "Connect functions to buttons/objects" section and add a line to connect
the actual QAction object with the function for each box:
self.{list var}[boxNumber].triggered.connect(lambda _, b=i: self.{function}(boxIndex=b,
{other vars}))
"""
for boxIndex in self.boxList:
# Create necessary objects for each box
self.statsActionList.append(QtGui.QAction("Performance", self))
# menu-specific
self.boxMenuList.append(QtGui.QMenu())
self.fileSelectActionList.append(QtGui.QAction("&Select Settings file", self))
self.rawTrialActionList.append(QtGui.QAction("Get &Raw Trial Data", self))
self.openFolderActionList.append(QtGui.QAction("Open &Data folder", self))
self.openSettingsActionList.append(QtGui.QAction("&Open Settings file", self))
self.openBoxLogActionList.append(QtGui.QAction("Open &Log file", self))
self.createNewJsonList.append(QtGui.QAction("&New Settings file", self))
self.newBirdActionList.append(QtGui.QAction("New &Bird", self))
self.useNRList.append(QtGui.QAction("Use &NR Trials", self))
self.autoSleepList.append(QtGui.QAction("&Autosleep", self))
self.optionsMenuList.append(QtGui.QMenu("Options"))
self.solenoidMenuList.append(QtGui.QMenu("Water Control"))
self.primeActionList.append(QtGui.QAction("Prime (5s)", self))
self.purgeActionList.append(QtGui.QAction("Purge (20s)", self))
self.solenoidManualList.append(QtGui.QAction("Manual Control", self))
self.soundCheckActionList.append(QtGui.QAction("Sound Check", self))
# Reorder to change order in menu
"""
boxMenu:
Select Settings file
---
Open data folder
Open Settings file
New settings file
Water Control:
Prime
Purge
Manual
Options:
Autosleep
Use NR
---
Sound Check
---
New bird
---
Open log file
Get raw trial data
"""
self.boxMenuList[boxIndex].addAction(self.openFolderActionList[boxIndex])
self.boxMenuList[boxIndex].addSeparator()
self.boxMenuList[boxIndex].addAction(self.fileSelectActionList[boxIndex])
self.boxMenuList[boxIndex].addAction(self.openSettingsActionList[boxIndex])
self.boxMenuList[boxIndex].addAction(self.createNewJsonList[boxIndex])
self.boxMenuList[boxIndex].addSeparator()
# option submenu
self.optionsMenuList[boxIndex].addAction(self.autoSleepList[boxIndex])
self.optionsMenuList[boxIndex].addAction(self.useNRList[boxIndex])
self.optionsMenuList[boxIndex].addSeparator()
self.optionsMenuList[boxIndex].addAction(self.soundCheckActionList[boxIndex])
self.optionsMenuList[boxIndex].addSeparator()
self.optionsMenuList[boxIndex].addAction(self.newBirdActionList[boxIndex])
self.optionsMenuList[boxIndex].addSeparator()
self.optionsMenuList[boxIndex].addAction(self.openBoxLogActionList[boxIndex])
self.optionsMenuList[boxIndex].addAction(self.rawTrialActionList[boxIndex])
# Solenoid submenu
self.solenoidMenuList[boxIndex].addAction(self.primeActionList[boxIndex])
self.solenoidMenuList[boxIndex].addAction(self.purgeActionList[boxIndex])
self.solenoidMenuList[boxIndex].addAction(self.solenoidManualList[boxIndex])
self.useNRList[boxIndex].setCheckable(True)
self.autoSleepList[boxIndex].setCheckable(True)
self.autoSleepList[boxIndex].setChecked(self.ui_options['autosleep_all'].isChecked())
self.autoSleepList[boxIndex].setText('Autosleep (Box {:02d})'.format(boxIndex + 1))
autosleepMenu.addAction(self.autoSleepList[boxIndex])
self.useNRList[boxIndex].setText('Use NR Trials (Box {:02d})'.format(boxIndex + 1))
nrTrialMenu.addAction(self.useNRList[boxIndex])
# endregion
# region Other box-specific var setup
for boxIndex in self.boxList:
# Fill sleep schedule var with None to start, and fill later when box is started
self.sleepScheduleList.append(None)
self.lastStartList.append(None)
self.lastTrialList.append(None)
# Queue for running subprocesses and | |
result: {}'.format(check_result))
log_file_logger.error('#06: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#06: Check result: {}'.format(check_result))
log_file_logger.info('#06: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #06:Check:vManage:Look for any neo4j exception errors. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#07:Check:vManage:Validate all services are up
log_file_logger.info('#07:Check:vManage:Validate all services are up')
writeFile(report_file, '#07:Check:vManage:Validate all services are up\n\n')
try:
nms_data, nms_failed, check_result, check_analysis, check_action = criticalCheckseven()
if check_result == 'Failed':
critical_checks['#07:Check:vManage:Validate all services are up'] = [check_analysis, check_action]
log_file_logger.error('#07: Check result: {}'.format(check_result))
log_file_logger.error('#07: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#07: List of services that are enabled but not running:\n\n {}\n\n'.format(nms_failed))
log_file_logger.error('#07: Status of all services :\n{}\n'.format(nms_data))
writeFile(report_file, 'Result: ERROR - {}\n\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#07: Check result: {}'.format(check_result))
log_file_logger.info('#07: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#07: Status of all the services:\n{}\n'.format(nms_data))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #07:Check:vManage:Validate all services are up. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#08:Check:vManage:Elasticsearch Indices version
log_file_logger.info('#08:Check:vManage:Elasticsearch Indices version')
writeFile(report_file, '#08:Check:vManage:Elasticsearch Indices version\n\n')
try:
version_list, check_result, check_analysis, check_action = criticalCheckeight(version_tuple)
if check_result == 'Failed':
critical_checks['#08:Check:vManage:Elasticsearch Indices version'] = [ check_analysis, check_action]
log_file_logger.error('#08: Check result: {}'.format(check_result))
log_file_logger.error('#08: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#08: List of indices with older versions: \n{}\n'.format(version_list))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#08: Check result: {}'.format(check_result))
log_file_logger.info('#08: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #08:Check:vManage:Elasticsearch Indices version. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#09:Check:vManage:Evaluate incoming DPI data size
log_file_logger.info('#09:Check:vManage:Evaluate incoming DPI data size')
writeFile(report_file, '#09:Check:vManage:Evaluate incoming DPI data size\n\n')
try:
es_indices_est = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid, 'management/elasticsearch/index/size/estimate', args.vmanage_port, tokenid))
appr_estimate_ondeday, dpi_estimate_ondeday, check_result, check_analysis,check_action = criticalChecknine(es_indices_est, server_type, cluster_size, cpu_count, total_devices, dpi_status)
if check_result == 'Failed':
critical_checks['#09:Check:vManage:Evaluate incoming DPI data size'] = [ check_analysis, check_action]
log_file_logger.error('#09: Check result: {}'.format(check_result))
log_file_logger.error('#09: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#09: Daily incoming DPI data : {}'.format(dpi_estimate_ondeday))
log_file_logger.error('#09: Daily incoming Approute data : {}\n'.format(appr_estimate_ondeday))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#09: Check result: {}'.format(check_result))
log_file_logger.info('#09: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #09:Check:vManage:Evaluate incoming DPI data size. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#10:Check:vManage:NTP status across network
log_file_logger.info('#10:Check:vManage:NTP status across network')
writeFile(report_file, '#10:Check:vManage:NTP status across network\n\n')
try:
ntp_nonworking, check_result, check_analysis, check_action = criticalCheckten(version_tuple, controllers_info)
if check_result == 'Failed':
critical_checks['#10:Check:vManage:NTP status across network'] = [ check_analysis, check_action]
log_file_logger.error('#10: Check result: {}'.format(check_result))
log_file_logger.error('#10: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#10: Devices with invalid ntp associations: \n{}\n'.format(ntp_nonworking))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#10: Check result: {}'.format(check_result))
log_file_logger.info('#10: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error perforiming #10:Check:vManage:NTP status across network. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#11:Check:Controllers:Validate vSmart/vBond CPU count for scale
log_file_logger.info('#11:Check:Controllers:Validate vSmart/vBond CPU count for scale')
writeFile(report_file, '#11:Check:Controllers:Validate vSmart/vBond CPU count for scale\n\n')
try:
for vbond in vbond_info:
output = json.loads(getRequestpy3( version_tuple,vmanage_lo_ip, jsessionid, 'device/system/synced/status?deviceId={}'.format(vbond_info[vbond][1]),args.vmanage_port, tokenid))
total_cpu_count = int(output['data'][0]['total_cpu_count'])
vbond_info[vbond].append(total_cpu_count)
for vsmart in vsmart_info:
output = json.loads(getRequestpy3( version_tuple,vmanage_lo_ip, jsessionid, 'device/system/synced/status?deviceId={}'.format(vsmart_info[vsmart][1]),args.vmanage_port,tokenid))
total_cpu_count = int(output['data'][0]['total_cpu_count'])
vsmart_info[vsmart].append(total_cpu_count)
failed_vbonds,failed_vsmarts,check_result,check_analysis, check_action = criticalCheckeleven(total_devices, vbond_info, vsmart_info)
if check_result == 'Failed':
critical_checks['#11:Check:Controllers:Validate vSmart/vBond CPU count for scale'] = [ check_analysis, check_action]
log_file_logger.error('#11: Check result: {}'.format(check_result))
log_file_logger.error('#11: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#11: vBonds with insufficient CPU count: \n{}'.format(failed_vbonds))
log_file_logger.error('#11: vSmarts with insufficient CPU count: \n{}'.format(failed_vsmarts))
log_file_logger.error('#11: All vBonds info with total_cpu_count: \n{}'.format(vbond_info))
log_file_logger.error('#11: All vSmarts info with total_cpu_count: \n{}\n'.format(vsmart_info))
writeFile(report_file, 'Result: ERROR - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#11: Check result: {}'.format(check_result))
log_file_logger.info('#11: Check Analysis: {}'.format(check_analysis))
log_file_logger.info('#11: All vBonds info with total_cpu_count: \n{}'.format(vbond_info))
log_file_logger.info('#11: All vSmarts info with total_cpu_count: \n{}\n'.format(vsmart_info))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #11:Check:Controllers:Validate vSmart/vBond CPU count for scale. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#Warning Checks
warning_checks = {}
log_file_logger.info('*** Performing Warning Checks')
#12:Check:vManage:CPU Speed
log_file_logger.info('#12:Check:vManage:CPU Speed')
writeFile(report_file, '#12:Check:vManage:CPU Speed\n\n')
try:
check_result,check_analysis,check_action = warningCheckone(cpu_speed)
if check_result == 'Failed':
warning_checks['#12:Check:vManage:CPU Speed'] = [ check_analysis, check_action]
log_file_logger.error('#12: Check result: {}'.format(check_result))
log_file_logger.error('#12: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#12: CPU clock speed: {}\n'.format(cpu_speed))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#12: Check result: {}'.format(check_result))
log_file_logger.info('#12: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #12:Check:vManage:CPU Speed. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#13:Check:vManage:Network Card type
log_file_logger.info('#13:Check:vManage:Network Card type')
writeFile(report_file, '#13:Check:vManage:Network Card type\n\n')
try:
eth_drivers, check_action, check_analysis, check_result = warningChecktwo()
if check_result == 'Failed':
warning_checks['#13:Check:vManage:Network Card type'] = [ check_analysis, check_action]
log_file_logger.error('#13: Check result: {}'.format(check_result))
log_file_logger.error('#13: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#13: Ethercardswith e1000 card types: {}\n'.format(eth_drivers))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#13: Check result: {}'.format(check_result))
log_file_logger.info('#13: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #13:Check:vManage:Network Card type. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#14:Check:vManage:Backup status
log_file_logger.info('#14:Check:vManage:Backup status')
writeFile(report_file, '#14:Check:vManage:Backup status\n\n')
try:
date_time_obj, check_result, check_analysis, check_action = warningCheckthree()
if check_result == 'Failed':
warning_checks['#14:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#14: Check result: {}'.format(check_result))
log_file_logger.error('#14: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#14: Last Backup was performed on:{}\n'.format(date_time_obj))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#14: Check result: {}'.format(check_result))
log_file_logger.info('#14: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #14:Check:vManage:Backup status. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#15:Check:vManage:Evaluate Neo4j performance
log_file_logger.info('#15:Check:vManage:Evaluate Neo4j performance')
writeFile(report_file, '#15:Check:vManage:Evaluate Neo4j performance\n\n')
try:
check_result, check_analysis, check_action = warningCheckfour()
if check_result == 'Failed':
warning_checks['#15:Check:vManage:Backup status'] = [ check_analysis, check_action]
log_file_logger.error('#15: Check result: {}'.format(check_result))
log_file_logger.error('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#15: Check result: {}'.format(check_result))
log_file_logger.info('#15: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #15:Check:vManage:Evaluate Neo4j performance. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#16:Check:vManage:Confirm there are no pending tasks
log_file_logger.info('#16:Check:vManage:Confirm there are no pending tasks')
writeFile(report_file, '#16:Check:vManage:Confirm there are no pending tasks\n\n')
try:
tasks = json.loads(getRequestpy3(version_tuple,vmanage_lo_ip, jsessionid,'device/action/status/tasks', args.vmanage_port, tokenid))
tasks_running, check_result, check_analysis, check_action = warningCheckfive(tasks)
if check_result == 'Failed':
warning_checks['#16:Check:vManage:Confirm there are no pending tasks'] = [ check_analysis, check_action]
log_file_logger.error('#16: Check result: {}'.format(check_result))
log_file_logger.error('#16: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#16: Tasks still running: {}\n'.format(tasks_running))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#16: Check result: {}'.format(check_result))
log_file_logger.info('#16: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #16:Check:vManage:Confirm there are no pending tasks. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#17:Check:vManage:Validate there are no empty password users
log_file_logger.info('#17:Check:vManage:Validate there are no empty password users')
writeFile(report_file, '#17:Check:vManage:Validate there are no empty password users\n\n')
try:
users_emptypass, check_result, check_analysis, check_action = warningChecksix(version_tuple)
if check_result == 'Failed':
warning_checks['#17:Check:vManage:Validate there are no empty password users'] = [ check_analysis, check_action]
log_file_logger.error('#17: Check result: {}'.format(check_result))
log_file_logger.error('#17: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#17: Users with empty passwords: {}\n'.format(users_emptypass))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#17: Check result: {}'.format(check_result))
log_file_logger.info('#17: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #17:Check:vManage:Validate there are no empty password users. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#18:Check:Controllers:Controller versions
log_file_logger.info('#18:Check:Controllers:Controller versions')
writeFile(report_file, '#18:Check:Controllers:Controller versions\n\n')
try:
check_result, check_analysis, check_action = warningCheckseven(controllers_info)
if check_result == 'Failed':
warning_checks['#18:Check:Controllers:Controller versions'] = [ check_analysis, check_action]
log_file_logger.error('#18: Check result: {}'.format(check_result))
log_file_logger.error('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#18: Check result: {}'.format(check_result))
log_file_logger.info('#18: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #18:Check:Controllers:Controller versions. \n Please check error details in log file: {}.\n If needed, please reach out to tool support at: <EMAIL>, with your report and log file. \033[0;0m'.format(log_file_path))
log_file_logger.exception('{}\n'.format(e))
#19:Check:Controllers:Confirm Certificate Expiration Dates
log_file_logger.info('#19:Check:Controllers:Confirm Certificate Expiration Dates')
writeFile(report_file, '#19:Check:Controllers:Confirm Certificate Expiration Dates\n\n')
try:
controllers_exp, controllers_notexp, check_result, check_analysis, check_action = warningCheckeight(controllers_info)
if check_result == 'Failed':
warning_checks['#19:Check:Controllers:Confirm Certificate Expiration Dates'] = [ check_analysis, check_action]
log_file_logger.error('#19: Check result: {}'.format(check_result))
log_file_logger.error('#19: Check Analysis: {}'.format(check_analysis))
log_file_logger.error('#19: Controllers with certificates close to expiration: \n{}\n'.format(controllers_exp))
writeFile(report_file, 'Result: WARNING - {}\n'.format(check_analysis))
writeFile(report_file, 'Action: {}\n\n'.format(check_action))
else:
log_file_logger.info('#19: Check result: {}'.format(check_result))
log_file_logger.info('#19: Check Analysis: {}\n'.format(check_analysis))
writeFile(report_file, 'Result: INFO - {}\n\n'.format(check_analysis))
except Exception as e:
print('\033[1;31m ERROR: Error performing #19:Check:Controllers:Confirm Certificate Expiration Dates. \n Please check error details in log file: {}.\n If needed, please | |
<filename>udd-mirror/scraper.py
#! /usr/bin/env python3
#
# Copyright (c) 2018-2020 FASTEN.
#
# This file is part of FASTEN
# (see https://www.fasten-project.eu/).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import argparse
import time
import psycopg2
from abc import ABC, abstractmethod
from psycopg2 import OperationalError
from kafka import KafkaProducer
# Some statically defined variables.
host = "udd-mirror.debian.net"
user = "udd-mirror"
password = "<PASSWORD>"
date_format = "%Y-%m-%d %H:%M:%S"
DEBUG_PACKAGES = ['anna', 'debianutils', 'mutt', 'ncurses', 'zlib', 'libgsf',
'libxml2', 'planner', 'util-linux', 'calibre', 'asterisk', 'sylpheed',
'nginx', 'bluez', 'openssh', 'exiv2', 'dash', 'xscreensaver', 'poppler',
'kopanocore', 'kopanocore', 'util-vserver', 'dnsmasq', 'fakeroot',
'libarchive', '"imagemagick', 'oath-toolkit', 'pam', 'evolution',
'bibutils', 'nethack', 'nis', 'nasm', 'cpio', 'varnish', 'keyutils',
'libpcap', 'ppp', 'libtool', 'bind9', 'tnef', 'seafile', 'systemd',
'trousers', 'convlit', 'gpp', 'tar', 'cracklib2', 'postfix', 'sudo',
'apt', 'linphone', 'ipmitool', 'minidjvu', 'sox', 'cimg', 'dhcpcd5',
'arj', 'pwgen', 'm2crypto', 'gedit', 'dia', 'tiff', 'filezilla', 'apparmor',
'kannel', 'secure-delete', 'opensc', 'blender', 'qbittorrent', 'webfs',
'gthumb', '9base', 'hunspell', 'keepassx', 'gdm3', 'shadow', 'clamav',
'nfs-utils', 'pcre3', 'tcc', 'lout', 'uw-imap', 'pvm', 'xcftools', 'lava',
'grub2', 'graphviz', 'librsync', 'git', 'cflow', 'rsync', 'encfs', 'nvi',
'postgresql-common', 'gcc', 'glibc', 'dsniff', 'sqlite3', 'bash'
]
class DebianPackageRelease:
"""Represents a Debian Package Release.
"""
def __init__(self, package, version, source, source_version, date, arch,
release):
self.package = package
self.source = source
self.version = version
self.source_version = source_version
self.date = date
self.arch = arch # Architecture (amd64, powerpc, all, etc.)
self.release = release # Debian Release (buster, sid, etc.)
def print_release(self):
"""Prints a Debian package release.
"""
print("Release: {0}-{1}-{2}-{3}. Uploaded at: {4}".format(
self.package,
self.version,
self.arch,
self.release,
str(self.date)
))
def to_json(self):
"""Dumps a Debian Package Release into JSON format.
"""
return json.dumps({
"package": self.package,
"version": self.version,
"arch": self.arch,
"release": self.release,
"source": self.source,
"source_version": self.source_version,
"date": str(self.date)
})
class Udd(ABC):
"""Get Debian Packages Releases using UDD mirror"""
host = host
user = user
password = password
dbname = 'udd'
def __init__(self, bootstrap_servers=None, kafka_topic=None,
is_c=False, release=None, arch=None):
self.servers = bootstrap_servers
self.topic = kafka_topic
self.is_c = is_c
self.release = release
self.arch = arch
self.con = None
self.produce = self.to_print
if self.servers is not None and self.topic is not None:
self.produce = self.to_kafka
try:
self.con = self._connect_db()
except OperationalError:
print("Cannot connect to database {}:{}".format(host, 'udd'))
def _connect_db(self):
"""Return a connection to database.
Raises: OperationalError
"""
try:
con = psycopg2.connect(
host=self.host, user=self.user,
password=<PASSWORD>, dbname=self.dbname
)
except OperationalError:
raise
return con
@abstractmethod
def query(self):
"""Query db for releases.
"""
def parse_query(self, query_result):
"""Parses a query and returns Debian Packages Releases.
"""
releases = []
for row in query_result:
releases.append(
DebianPackageRelease(
row[0], row[1], row[2], row[3], "", row[4], row[5]
)
)
return releases
def get_releases(self):
"""Get Debian Packages Releases.
"""
releases = []
if self.con is None:
return releases
releases = self.parse_query(self.query())
print(len(releases))
# Return them sorted.
# To sort them by date add:
return sorted(releases, key=lambda x: x.date)
def to_print(self):
"""Print Debian Packages Releases to stdout/
"""
releases = self.get_releases()
for release in list(releases):
print(release.to_json())
# Return latest date (if any new releases are found).
if len(releases) == 0:
if hasattr(self, 'start_date'):
return self.start_date
else:
return str(datetime.datetime.today().strftime(date_format))
return releases[-1].date
# We need this line if we use dates
# return releases[-1].date.replace(tzinfo=None)
def to_kafka(self):
"""Get all Debian Packages Releases from a certain date
and push to Kafka.
"""
releases = self.get_releases()
producer = KafkaProducer(
bootstrap_servers=self.servers.split(','),
value_serializer=lambda x: x.encode('utf-8')
)
for release in releases:
release.print_release()
producer.send(self.topic, release.to_json())
producer.flush()
print("{0}: Sent {1} releases.".format(
str(datetime.datetime.now()), len(releases))
)
# Return latest date (if any new releases are found).
if len(releases) == 0:
if hasattr(self, 'start_date'):
return self.start_date
else:
return str(datetime.datetime.today().strftime(date_format))
return releases[-1].date.replace(tzinfo=None)
class AllUdd(Udd):
"""Fetch the latest releases of all packages.
"""
def query(self):
cursor = self.con.cursor()
check_is_c = (" tag LIKE '%implemented-in::c%' "
if self.is_c else '')
check_release = (" release = '{}' ".format(self.release)
if self.release else '')
check_arch = (" architecture = '{}' ".format(self.arch)
if self.arch else '')
checks = [check_is_c, check_release, check_arch]
check = ''
for counter, i in enumerate(checks):
check += i
if counter < len(checks)-1 and check != '':
if checks[counter+1] != '':
check += "AND"
check = "WHERE " + check if check != '' else ''
query = ("SELECT package, packages.version, packages.source, "
"source_version, architecture, release "
"FROM packages "
"{}"
).format(check)
cursor.execute(query)
return cursor.fetchall()
class PackageUdd(Udd):
"""Fetch release(s) of a single package.
"""
def __init__(self, bootstrap_servers=None, kafka_topic=None,
is_c=False, release=None, arch=None, package='',
version=None, is_package=True):
super(PackageUdd, self).__init__(
bootstrap_servers, kafka_topic, is_c, release, arch
)
self.package = package
self.version = version
self.is_package = is_package
def query(self):
cursor = self.con.cursor()
check_is_c = (" AND tag LIKE '%implemented-in::c%' "
if self.is_c else '')
check_release = (" AND release = '{}' ".format(self.release)
if self.release else '')
check_arch = (" AND architecture = '{}' ".format(self.arch)
if self.arch else '')
check_version = (" AND version = '{}' ".format(self.version)
if self.version else '')
package_source = "package" if self.is_package else "packages.source"
check = "{}{}{}{}".format(
check_is_c, check_release, check_arch, check_version
)
query = ("SELECT package, packages.version, packages.source, "
"source_version, architecture, release "
"FROM packages "
"WHERE {} = '{}' "
"{} LIMIT 1"
).format(package_source, self.package, check)
cursor.execute(query)
return cursor.fetchall()
class DateUdd(Udd):
"""Fetch releases from a starting date.
"""
def __init__(self, bootstrap_servers=None, kafka_topic=None,
is_c=False, release=None, arch=None, package=None,
date=''):
super(DateUdd, self).__init__(
bootstrap_servers, kafka_topic, is_c, release, arch
)
self.package = package
self.start_date = date
def query(self):
cursor = self.con.cursor()
check_is_c = (" AND tag LIKE '%implemented-in::c%' "
if self.is_c else '')
check_release = (" AND release = '{}' ".format(self.release)
if self.release else '')
check_arch = (" AND architecture = '{}' ".format(self.arch)
if self.arch else '')
check_package = (" AND package = '{}' ".format(self.package)
if self.package else '')
check = "{}{}{}{}".format(
check_is_c, check_release, check_arch, check_package
)
raise NotImplementedError("Upload history does not contain all packages")
query = ("SELECT package, packages.version, packages.source, "
"source_version, date, architecture, release "
"FROM packages "
"WHERE date > '{start_date}+00' "
"{check}").format(
start_date=self.start_date,
check=check
)
cursor.execute(query)
return cursor.fetchall()
class DebugUdd(Udd):
"""Fetch specific releases from a default list with sources.
"""
def query(self):
cursor = self.con.cursor()
check_is_c = (" AND tag LIKE '%implemented-in::c%' "
if self.is_c else '')
check_release = (" AND release = '{}' ".format(self.release)
if self.release else '')
check_arch = (" AND architecture = '{}' ".format(self.arch)
if self.arch else '')
checks = '{} {} {}'.format(check_is_c, check_release, check_arch)
packages = ""
template = "packages.source = '{}' OR "
for s in DEBUG_PACKAGES:
packages += template.format(s)
packages = packages[:-3]
query = ("SELECT package, packages.version, packages.source, "
"source_version, architecture, release "
"FROM packages "
"WHERE ({}) {}"
).format(packages, checks)
cursor.execute(query)
return cursor.fetchall()
def get_parser():
parser = argparse.ArgumentParser(
"Scrape Debian packages releases, and optionally push them to Kafka."
)
parser.add_argument(
'-a',
'--architecture',
type=str,
default=None,
help='Specify an architecture (e.g. amd64).'
)
parser.add_argument(
'-b',
'--bootstrap-servers',
type=str,
default=None,
help="Kafka servers, comma separated."
)
parser.add_argument(
'-C',
'--not-only-c',
action='store_true',
help='Fetch all types of packages releases (not only C packages).'
)
parser.add_argument(
'-d',
'--start-date',
type=lambda s: datetime.datetime.strptime(s, date_format),
help=("The date to start scraping from. Must be in "
"%%Y-%%m-%%d %%H:%%M:%%S format.")
)
parser.add_argument(
'-D',
'--debug',
action='store_true',
help="Fetch some predefined packages for debugging purposes"
)
parser.add_argument(
'-f',
'--forever',
action='store_true',
help="Run forever. Always use it with --start-date."
)
parser.add_argument(
'-p',
'--package',
type=str,
help="Package's name to fetch."
)
parser.add_argument(
'-r',
'--release',
type=str,
default=None,
help='Specify Debian Release (e.g. buster).'
)
parser.add_argument(
'-s',
'--sleep-time',
type=int,
default=43200,
help=("Time to sleep in between each scrape (in sec). Use it with "
"--start-date option. Default 43.200 seconds (12 hours).")
)
parser.add_argument(
'-S',
'--source',
type=str,
help="Source's name to fetch."
)
parser.add_argument(
'-t',
'--topic',
type=str,
default=None,
help="Kafka topic to push to."
)
parser.add_argument(
'-v',
'--version',
type=str,
help='Version of | |
<filename>wc_model_gen/eukaryote/initialize_model.py
""" Initialize the construction of wc_lang-encoded models from wc_kb-encoded knowledge base.
:Author: <NAME> <<EMAIL>>
:Date: 2019-01-09
:Copyright: 2019, Karr Lab
:License: MIT
"""
from wc_utils.util.chem import EmpiricalFormula, OpenBabelUtils
from wc_utils.util.chem.marvin import get_major_micro_species
from wc_onto import onto as wc_ontology
from wc_utils.util.units import unit_registry
from wc_utils.util import chem
import wc_model_gen.global_vars as gvar
import ete3
import math
import mendeleev
import numpy
import openbabel
import scipy.constants
import wc_kb
import wc_lang
import wc_model_gen
class InitializeModel(wc_model_gen.ModelComponentGenerator):
""" Initialize model from knowledge base
Options:
* culture_volume (:obj:`float`): volume of cell culture; default is 1.0 liter
* cell_density(:obj:`float`): cell density; default is 1040 g/liter
* membrane_density (:obj:`float`): membrane density; default is 1160 g/liter
* cds (:obj:`bool`): True indicates mRNA sequence is a complete CDS; default is True
* amino_acid_id_conversion (:obj:`dict`): a dictionary with amino acid standard ids
as keys and amino acid metabolite ids as values
* selenoproteome (:obj:`list`): list of IDs of genes that translate into
selenoproteins, default is an empty list
* environment (:obj:`dict`): dictionary with details for generating cell environment in the model
* ph (:obj:`float`): pH at which species will be protonated and reactions will be balanced; default is 7.4
* media (:obj:`dict`): a dictionary with species type ids as keys and tuples of concentration (M) in the
media (extracellular space), `list` of `wc_lang.Reference`, and comments as values
* rna_input_seq (:obj:`dict`, optional): a dictionary with RNA ids as keys and sequence strings as values
* smiles_input (:obj:`dict`, optional): a dictionary with metabolite ids as keys and smiles strings as values
* check_reaction (:obj:`bool`): if True, reactions will be checked and corrected for proton and charge balance;
default is True
* gen_dna (:obj:`bool`): if True, DNA species types and species will be generated;
default is True
* gen_transcripts (:obj:`bool`): if True, transcript species types and species will be generated;
default is True
* gen_protein (:obj:`bool`): if True, protein species types and species will be generated;
default is True
* gen_metabolites (:obj:`bool`): if True, metabolite species types and species will be generated;
default is True
* gen_complexes (:obj:`bool`): if True, macromolecular complex species types and species will be generated;
default is True
* gen_distribution_init_concentration (:obj:`bool`): if True, initial concentration of species will be generated;
default is True
* gen_observables (:obj:`bool`): if True, observables will be generated; default is True
* gen_kb_reactions (:obj:`bool`): if True, reactions will be generated; default is True
* gen_dfba_objective (:obj:`bool`): if True, a dfba objective function will be created; default is False
* gen_kb_rate_laws (:obj:`bool`): if True, rate laws will be generated; default is True
* gen_environment (:obj:`bool`): if True, cell environment will be generated; default is True
"""
def run(self):
""" Run all the components for initializing model from knowledge base """
self.clean_and_validate_options()
options = self.options
print('Initialization is starting...')
self.gen_taxon()
self.gen_compartments()
self.gen_parameters()
self.global_vars_from_input()
print('Taxon, compartments, and parameters have been initialized')
if options['gen_metabolites']:
self.gen_metabolites()
print('All metabolite species types and species have been initialized')
if options['gen_dna']:
self.gen_dna()
print('All DNA species types and species have been initialized')
if options['gen_transcripts']:
self.gen_transcripts()
print('All transcript species types and species have been initialized')
if options['gen_protein']:
self.gen_protein()
print('All protein species types and species have been initialized')
if options['gen_complexes']:
self.gen_complexes()
print('All complex species types and species have been initialized')
if options['gen_distribution_init_concentrations']:
self.gen_distribution_init_concentrations()
print('Concentrations of species have been initialized')
if options['gen_observables']:
self.gen_observables()
print('Model observables have been initialized')
if options['gen_kb_reactions']:
self.gen_kb_reactions()
print('Reactions in knowledge base have been initialized')
if options['gen_kb_rate_laws']:
self.gen_kb_rate_laws()
print('Rate laws in knowledge base have been initialized')
if options['gen_environment']:
self.gen_environment()
print('Model generator has been initialized')
def clean_and_validate_options(self):
""" Apply default options and validate options """
options = self.options
culture_volume = options.get('culture_volume', 1.)
assert(isinstance(culture_volume, float))
options['culture_volume'] = culture_volume
cell_density = options.get('cell_density', 1040.)
assert(isinstance(cell_density, float))
options['cell_density'] = cell_density
membrane_density = options.get('membrane_density', 1160.)
assert(isinstance(membrane_density, float))
options['membrane_density'] = membrane_density
cds = options.get('cds', True)
assert(isinstance(cds, bool))
options['cds'] = cds
amino_acid_id_conversion = options.get('amino_acid_id_conversion', {})
assert(isinstance(amino_acid_id_conversion, dict))
options['amino_acid_id_conversion'] = amino_acid_id_conversion
selenoproteome = options.get('selenoproteome', [])
assert(isinstance(selenoproteome, list))
options['selenoproteome'] = selenoproteome
environment = options.get('environment', {})
assert(isinstance(environment, dict))
options['environment'] = environment
ph = options.get('ph', 7.4)
assert(isinstance(ph, float))
options['ph'] = ph
media = options.get('media', {})
assert(isinstance(media, dict))
options['media'] = media
rna_input_seq = options.get('rna_input_seq', {})
assert(isinstance(rna_input_seq, dict))
options['rna_input_seq'] = rna_input_seq
smiles_input = options.get('smiles_input', {})
assert(isinstance(smiles_input, dict))
options['smiles_input'] = smiles_input
check_reaction = options.get('check_reaction', True)
assert(isinstance(check_reaction, bool))
options['check_reaction'] = check_reaction
gen_metabolites = options.get('gen_metabolites', True)
assert(isinstance(gen_metabolites, bool))
options['gen_metabolites'] = gen_metabolites
gen_dna = options.get('gen_dna', True)
assert(isinstance(gen_dna, bool))
options['gen_dna'] = gen_dna
gen_transcripts = options.get('gen_transcripts', True)
assert(isinstance(gen_transcripts, bool))
options['gen_transcripts'] = gen_transcripts
gen_protein = options.get('gen_protein', True)
assert(isinstance(gen_protein, bool))
options['gen_protein'] = gen_protein
gen_complexes = options.get('gen_complexes', True)
assert(isinstance(gen_complexes, bool))
options['gen_complexes'] = gen_complexes
gen_distribution_init_concentrations = options.get('gen_distribution_init_concentrations', True)
assert(isinstance(gen_distribution_init_concentrations, bool))
options['gen_distribution_init_concentrations'] = gen_distribution_init_concentrations
gen_observables = options.get('gen_observables', True)
assert(isinstance(gen_observables, bool))
options['gen_observables'] = gen_observables
gen_kb_reactions = options.get('gen_kb_reactions', True)
assert(isinstance(gen_kb_reactions, bool))
options['gen_kb_reactions'] = gen_kb_reactions
gen_dfba_objective = options.get('gen_dfba_objective', False)
assert(isinstance(gen_dfba_objective, bool))
options['gen_dfba_objective'] = gen_dfba_objective
gen_kb_rate_laws = options.get('gen_kb_rate_laws', True)
assert(isinstance(gen_kb_rate_laws, bool))
options['gen_kb_rate_laws'] = gen_kb_rate_laws
gen_environment = options.get('gen_environment', True)
assert(isinstance(gen_environment, bool))
options['gen_environment'] = gen_environment
def gen_taxon(self):
""" Generate taxon for the model from knowledge base """
kb = self.knowledge_base
model = self.model
ncbi_taxa = ete3.NCBITaxa()
taxon_name = ncbi_taxa.get_taxid_translator([kb.cell.taxon])[kb.cell.taxon]
taxon_rank = ncbi_taxa.get_rank([kb.cell.taxon])[kb.cell.taxon]
model_taxon = wc_lang.core.Taxon(id='taxon', name=taxon_name, model=model,
rank=wc_lang.core.TaxonRank[taxon_rank])
def gen_compartments(self):
""" Generate compartments for the model from knowledge base """
kb = self.knowledge_base
model = self.model
if kb.cell.parameters.get_one(id='cell_volume'):
mean_cell_volume = kb.cell.parameters.get_one(id='cell_volume').value
else:
raise ValueError('The cell object does not have the parameter cell_volume')
culture_volume = self.options['culture_volume']
cell_density = self.options['cell_density']
membrane_density = self.options['membrane_density']
for comp in kb.cell.compartments:
c = model.compartments.get_or_create(
id=comp.id, name=comp.name)
c.init_density = model.parameters.create(
id='density_' + c.id,
units=unit_registry.parse_units('g l^-1'))
if comp.id=='e':
c.biological_type = wc_ontology['WC:extracellular_compartment']
c.init_density.value = 1000.
c.init_volume = wc_lang.core.InitVolume(distribution=wc_ontology['WC:normal_distribution'],
mean=culture_volume, std=0)
elif '_m' in comp.id:
c.physical_type = wc_ontology['WC:membrane_compartment']
c.init_density.value = membrane_density
organelle_fraction = kb.cell.compartments.get_one(id=comp.id[:comp.id.index('_')]).volumetric_fraction
c.init_volume = wc_lang.core.InitVolume(distribution=wc_ontology['WC:normal_distribution'],
mean=4.836E-09*(mean_cell_volume*organelle_fraction)**(2/3), std=0)
else:
c.init_density.value = cell_density
organelle_fraction = kb.cell.compartments.get_one(id=comp.id).volumetric_fraction
c.init_volume = wc_lang.core.InitVolume(distribution=wc_ontology['WC:normal_distribution'],
mean=mean_cell_volume*organelle_fraction - 4.836E-09*(mean_cell_volume*organelle_fraction)**(2/3), std=0)
volume = model.functions.create(id='volume_' + c.id, units=unit_registry.parse_units('l'))
volume.expression, error = wc_lang.FunctionExpression.deserialize(f'{c.id} / {c.init_density.id}', {
wc_lang.Compartment: {c.id: c},
wc_lang.Parameter: {c.init_density.id: c.init_density},
})
assert error is None, str(error)
def gen_parameters(self):
""" Generate parameters for the model from knowledge base """
kb = self.knowledge_base
model = self.model
Avogadro = model.parameters.create(id='Avogadro',
type = None,
value = scipy.constants.Avogadro,
units = unit_registry.parse_units('molecule mol^-1'))
# Create parameters from kb
for param in kb.cell.parameters:
model_param = model.parameters.create(
id=param.id,
name=param.name,
value=param.value,
units=param.units)
if 'K_m' in param.id:
model_param.type = wc_ontology['WC:K_m']
elif 'k_cat' in param.id:
model_param.type = wc_ontology['WC:k_cat']
model_param.units = unit_registry.parse_units('molecule^-1 s^-1')
else:
model_param.type = None
if param.references:
for ref in param.references:
ref_model = model.references.get_or_create(
__type=wc_lang.Reference,
author=ref.authors,
title=ref.title,
publication=ref.journal,
volume=ref.volume,
issue=ref.issue,
pages=ref.pages,
year=ref.year,
comments=ref.comments,
type=wc_ontology['WC:article'])
if not ref_model.id:
ref_model.id = 'ref_'+str(len(model.references))
model_param.references.append(ref_model)
if param.identifiers:
for identifier in param.identifiers:
identifier_model = wc_lang.Identifier(
namespace=identifier.namespace, id=identifier.id)
model_param.identifiers.append(identifier_model)
# Standardize the units of doubling time
if model.parameters.get_one(id='mean_doubling_time'):
model_doubling_time = model.parameters.get_one(id='mean_doubling_time')
else:
raise ValueError('The cell object does not have the parameter mean_doubling_time')
expr = unit_registry.parse_expression(str(model_doubling_time.units))
scale = expr.to(unit_registry.parse_units('second'))
conversion_factor = scale.magnitude
model_doubling_time.value *= conversion_factor
model_doubling_time.units = unit_registry.parse_units('s')
def global_vars_from_input(self):
""" Populate global variable if input transcript sequences are provided in the options
"""
rna_input_seq = self.options['rna_input_seq']
selenoproteome =self.options['selenoproteome']
for Id, seq in rna_input_seq.items():
gvar.transcript_ntp_usage[Id] = {
'A': seq.upper().count('A'),
'C': seq.upper().count('C'),
'G': seq.upper().count('G'),
'U': seq.upper().count('U'),
'len': len(seq)
}
def gen_metabolites(self):
""" Generate metabolites for the model from knowledge base """
kb = self.knowledge_base
model = self.model
kb_species_types = kb.cell.species_types.get(
__type=wc_kb.core.MetaboliteSpeciesType)
for kb_species_type in kb_species_types:
self.gen_species_type(kb_species_type)
def gen_dna(self):
""" Generate DNAs for the model from knowledge base """
kb = self.knowledge_base
model = self.model
kb_species_types = kb.cell.species_types.get(
__type=wc_kb.core.DnaSpeciesType)
for kb_species_type in kb_species_types:
if 'M' in kb_species_type.id:
self.gen_species_type(kb_species_type, ['m'])
else:
self.gen_species_type(kb_species_type, ['n'])
def gen_transcripts(self):
""" Generate transcripts (mature RNAs) for the model from knowledge base """
kb = self.knowledge_base
model = self.model
kb_species_types = kb.cell.species_types.get(
__type=wc_kb.eukaryote.TranscriptSpeciesType)
count = 0
for kb_species_type in kb_species_types:
self.gen_species_type(kb_species_type)
count += 1
if count % 100 == 0:
print('{}/{} of the transcripts have been initialized'.format(
count, len(kb_species_types)))
def gen_protein(self):
""" Generate proteins for the model from knowledge | |
isInsideRegion(self, maxWidth, maxHeight):
if self.left >= 0 and self.top >= 0 and self.right < maxWidth and self.bottom < maxHeight:
return True
else:
return False
def isValid(self):
if self.left>=self.right or self.top>=self.bottom:
return False
if min(self.rect()) < -self.MAX_VALID_DIM or max(self.rect()) > self.MAX_VALID_DIM:
return False
return True
def getEnclosingBbox(pts):
left = top = float('inf')
right = bottom = float('-inf')
for pt in pts:
left = min(left, pt[0])
top = min(top, pt[1])
right = max(right, pt[0])
bottom = max(bottom, pt[1])
return Bbox(left, top, right, bottom)
def ptRotate(pt, angle, centerPt = [0,0]):
#while angle < 0: angle += 360
#while angle >= 360: angle -= 360
theta = - angle / 180.0 * pi
ptRot = [0,0]
ptRot[0] = cos(theta) * (pt[0]-centerPt[0]) - sin(theta) * (pt[1]-centerPt[1]) + centerPt[0]
ptRot[1] = sin(theta) * (pt[0]-centerPt[0]) + cos(theta) * (pt[1]-centerPt[1]) + centerPt[1]
return ptRot
def rectRotate(rect, angle, centerPt = []):
left, top, right, bottom = rect
if centerPt == []:
centerPt = [0.5 * (left + right), 0.5 * (top + bottom)]
leftTopRot = ptRotate([left,top], angle, centerPt)
rightTopRot = ptRotate([right,top], angle, centerPt)
leftBottomRot = ptRotate([left,bottom], angle, centerPt)
rightBottomRot = ptRotate([right,bottom], angle, centerPt)
return [leftTopRot, rightTopRot, leftBottomRot, rightBottomRot]
def bboxRotate(bbox, angle):
centerPt = bbox.center()
leftTopRot,rightTopRot, leftBottomRot, rightBottomRot = rectRotate(bbox.rect(), angle, centerPt)
rotatedPolygon = [leftTopRot, rightTopRot, rightBottomRot, leftBottomRot]
return rotatedPolygon
def bboxAndImgRotate(bbox, angle, img):
rotatedPolygon = bboxRotate(bbox, angle)
bboxRot = getEnclosingBbox(rotatedPolygon)
imgRot = imrotate(img, angle, bbox.center())
maskRot = np.array(np.ones(img.shape[:2]), np.uint8)
maskRot = imrotate(maskRot, angle, bbox.center())
left, top, right, bottom = bboxRot.rect()
if bboxRot.isInsideRegion(*imWidthHeight(img)) and maskRot[top,left]==1 and maskRot[top,right]==1 and maskRot[bottom,left]==1 and maskRot[bottom,right]==1:
bboxRotInsideOriginalImage = True;
else:
bboxRotInsideOriginalImage = False
imshow(imgRot)
return bboxRot, imgRot, bboxRotInsideOriginalImage, rotatedPolygon, maskRot
def bboxComputeOverlapVoc(bbox1, bbox2):
surfaceRect1 = bbox1.surfaceArea()
surfaceRect2 = bbox2.surfaceArea()
surfaceOverlap = bbox1.getOverlapBbox(bbox2).surfaceArea()
return max(0, 1.0 * surfaceOverlap / (surfaceRect1 + surfaceRect2 - surfaceOverlap))
####################################
# SIFT / SURF / ORB
# (mostly high-level functions)
####################################
def serializeKeypoints(keyPoints):
return [[kp.pt, kp.size, kp.angle, kp.response, kp.octave, kp.class_id] for kp in keyPoints]
def deserializeKeypoints(kpInfos):
return [cv2.KeyPoint(x=kpInfo[0][0],y=kpInfo[0][1],_size=kpInfo[1], _angle=kpInfo[2], _response=kpInfo[3], _octave=kpInfo[4], _class_id=kpInfo[5]) for kpInfo in kpInfos]
def loadKeypointsAndDescriptors(featDir, imgFilename, keyLUT = None, descLUT = None):
keyPointsPath = featDir + "/" + imgFilename[:-4] + ".keyPoints.tsv"
descriptorsPath = featDir + "/" + imgFilename[:-4] + ".descriptors.tsv"
if keyLUT is None or keyPointsPath not in keyLUT.keys():
keyPointsSerialized = loadFromPickle(keyPointsPath)
keyPoints = deserializeKeypoints(keyPointsSerialized)
descriptors = loadFromPickle(descriptorsPath)
elif keyLUT is not None and keyPointsPath in keyLUT.keys():
keyPoints = keyLUT[keyPointsPath]
descriptors = descLUT[descriptorsPath]
elif keyLUT is not None and keyPointsPath not in keyLUT.keys():
keyLUT[keyPointsPath] = keyPoints
descLUT[descriptorsPath] = descriptors
return keyPoints,descriptors
def orbDetectAndCompute(img, orbObject, orbImresizeScale, orbImresizeMaxDim):
if orbImresizeScale != 1: #obtained better results for tiny objects if upscaling img
scale = orbImresizeScale
if max(img.shape) * scale > orbImresizeMaxDim:
scale = orbImresizeMaxDim / max(img.shape)
img = imresize(img, scale)
else:
scale = 1.0
#mask = np.zeros(img.shape[:2], np.uint8)
#mask[3*800:3*1200, 3*650:3*1400] = 1
keyPoints, descriptors = orbObject.detectAndCompute(img, None)
if scale != 1: #scale back to original image dimension
for i in range(len(keyPoints)):
keyPoints[i].pt = (round(keyPoints[i].pt[0]/scale), round(keyPoints[i].pt[1]/scale))
keyPoints[i].size = round(keyPoints[i].size/scale)
return keyPoints, descriptors
def findGoodMatches(matcher, test_desc, train_desc, train_kp, ratioThres, matchDistanceThres, maxNrMatchesPerLocation):
goodMatches = []
goodMatchesPts = dict()
matches = matcher.knnMatch(test_desc, train_desc, k=2)
for match in matches:
if len(match) > 1:
bestMatch, secondBestMatch = match
if secondBestMatch.distance == 0:
secondBestMatch.distance = 0.0001 #avoid division by zero errors
if bestMatch.distance < matchDistanceThres and bestMatch.distance / secondBestMatch.distance < ratioThres:
key = str(Round(train_kp[bestMatch.trainIdx].pt))
if key not in goodMatchesPts:
goodMatchesPts[key] = 1
if goodMatchesPts[key] <= maxNrMatchesPerLocation:
goodMatches.append(bestMatch)
goodMatchesPts[key] += 1
return goodMatches
def estimateRigidTransform(srcPts, dstPts, inlierThreshold, outlierCoordScale = 1.0, fullAffine = False):
srcPts = np.float32(srcPts).reshape(-1,1,2)
dstPts = np.float32(dstPts).reshape(-1,1,2)
M = cv2.estimateRigidTransform(srcPts, dstPts, fullAffine = fullAffine)
print M
if M is None:
inlierMask = np.zeros(len(srcPts))
else:
inlierMask = []
mappedPts = cv2.transform(srcPts, M)
for mappedPt,dstPt in zip(mappedPts, dstPts):
dist = np.linalg.norm(mappedPt/outlierCoordScale - dstPt/outlierCoordScale)
inlierMask.append(int(dist < inlierThreshold))
inlierMask = np.array(inlierMask)
return M, inlierMask
def matchesGeometricVerification(goodMatches, train_kp, test_kp, outlierCoordScale, projectionDistanceThres):
srcPts = [train_kp[m.trainIdx].pt for m in goodMatches]
dstPts = [ test_kp[m.queryIdx].pt for m in goodMatches]
transformMat, inlierMask = estimateRigidTransform(srcPts, dstPts, projectionDistanceThres, outlierCoordScale)
nrInliers = sum(inlierMask)
inlierRatio = 1.0 * nrInliers / len(goodMatches)
return (nrInliers, inlierRatio, inlierMask, transformMat)
def visualizeMatchingResult(bbox, testImg, trainImg, test_kp, train_kp, goodMatches, inlierMask):
trainImgScale = 3.0 #1.0
if trainImg != [] and trainImgScale != 1.0:
kpInfos = serializeKeypoints(train_kp)
for i in range(len(kpInfos)):
kpInfos[i][0] = (kpInfos[i][0][0]*trainImgScale, kpInfos[i][0][1]*trainImgScale)
train_kp = deserializeKeypoints(kpInfos)
trainImg = imresize(trainImg, trainImgScale)
#for i in range(len(train_kp_scaled)):
# pt = train_kp_scaled[i].pt
# train_kp_scaled[i].pt = (pt[0] * trainImgScale, pt[1] * trainImgScale)
if trainImg == []:
newImg = testImg[:]
lineThickness = 1 #max(2, int(ceil(max(testImg.shape) / 600.0)))
drawCircles(newImg, [x.pt for x in test_kp], lineThickness, color = (255, 0, 0), thickness = lineThickness)
#newImg = cv2.drawKeypoints(newImg, test_kp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
else:
lineThickness = int(max(2, max(ceil(max(trainImg.shape) / 1000.0), int(max(testImg.shape) / 1000.0))))
drawCircles(trainImg, [x.pt for x in train_kp], lineThickness, color = (255, 0, 0), thickness = lineThickness)
drawCircles(testImg, [x.pt for x in test_kp], lineThickness, color = (255, 0, 0), thickness = lineThickness)
#trainImg = cv2.drawKeypoints(trainImg, train_kp) #, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#testImg = cv2.drawKeypoints(testImg, test_kp) #, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#draw all matches
offsetX = trainImg.shape[1]
newImg = imConcat(trainImg, testImg)
for loopIndex,match in enumerate(goodMatches):
posTrain = train_kp[match.trainIdx].pt
posTest = test_kp[match.queryIdx].pt
posTest = (posTest[0] + offsetX, posTest[1])
if inlierMask[loopIndex]:
lineColor = [0, 255, 0]
else:
lineColor = [0, 0, 255]
drawLine(newImg, posTest, posTrain, lineColor, lineThickness)
#draw detected bounding box
if bbox != []:
bboxDraw = bbox[:]
bboxDraw[0] += offsetX
bboxDraw[2] += offsetX
drawRectangles(newImg, [bboxDraw], color = (0, 255, 0), thickness = 4* lineThickness)
return newImg
def orbFindBestMatch(matcher, testImgWidth, testImgHeight, testKp, testDesc, orbTrainInfos, MIN_MATCH_COUNT, MATCH_RATIO_THRES, DISTANCE_THRES, MAX_NR_MATCHES_PER_LOCATION, PROJECTION_DISTANCE_THRESHOLD, MIN_INLIER_RATIO):
bboxRect = []
bestNrInliers = 0
bestTrainImgInfo = ([], "", "", [])
bestTransformInfo = ([], [], [], -1)
#loop over all training images
for trainDataIndex, orbTrainInfo in enumerate(orbTrainInfos):
displayProgressBarPrompt(1.0 * trainDataIndex / len(orbTrainInfos))
trainObjectName, trainImgFilename, trainKp, trainDesc, trainBbox = orbTrainInfo
#find good matches
if trainDesc is not None and trainDesc != []:
goodMatches = findGoodMatches(matcher, testDesc, trainDesc, trainKp, MATCH_RATIO_THRES, DISTANCE_THRES, MAX_NR_MATCHES_PER_LOCATION)
print "len(goodMatches) = " + str(len(goodMatches))
#run geometric verification
if len(goodMatches) > bestNrInliers and len(goodMatches) > MIN_MATCH_COUNT:
nrInliers, inlierRatio, inlierMask, transformMat = matchesGeometricVerification(goodMatches, trainKp, testKp, 0.5 *(testImgWidth + testImgHeight), PROJECTION_DISTANCE_THRESHOLD)
print "nrInliers = " + str(nrInliers)
# remember best matching training image
if nrInliers > bestNrInliers and (1.0 * nrInliers / len(goodMatches)) > MIN_INLIER_RATIO:
bestNrInliers = nrInliers
bestTrainImgInfo = [trainKp, trainObjectName, trainImgFilename, trainBbox]
bestTransformInfo = [goodMatches, inlierMask, transformMat, inlierRatio]
#compute bounding box
if bestNrInliers > 0:
trainObjectName, trainImgFilename, trainBbox = bestTrainImgInfo[1:]
transformMat = bestTransformInfo[2]
bboxRect = [0, 0, trainBbox.width(), trainBbox.height()]
bboxRect = Bbox(*bboxRect).transform(transformMat).crop(testImgWidth, testImgHeight).rect()
return (bestNrInliers, bboxRect, bestTrainImgInfo, bestTransformInfo)
####################################
# Project Oxford Face API
####################################
def callFaceDetectionAPI(imgPath, subscriptionKey):
#specify image from url of from file
#body = "{'url':'https://c.s-microsoft.com/en-us/CMSImages/ImgMmnt_Ignite_768x768_EN_US.png?version=7b019640-7544-8e3d-06a2-43654307ae07'}"
#headers = {'Content-type': 'application/json'}
body = readBinaryFile(imgPath)
headers = {'Content-type': 'application/octet-stream'}
#call API
conn = httplib.HTTPSConnection('api.projectoxford.ai')
params = urllib.urlencode({'subscription-key': subscriptionKey,
'analyzesFaceLandmarks': 'true',
'analyzesAge': 'true',
'analyzesGender': 'true',
'analyzesHeadPose': 'true'})
conn.request("POST", "/face/v0/detections?%s" % params, body, headers)
response = conn.getresponse("")
jsonSring = response.read()
conn.close()
return jsonSring
def parseFaceJson(jsonString):
detectedFaces = json.loads(jsonString)
faceInfos = []
faceInfo = dict()
for detectedFace in detectedFaces:
faceInfo['gender'] = detectedFace['attributes']['gender']
faceInfo['age'] = detectedFace['attributes']['age']
faceInfo['yaw'] = detectedFace['attributes']['headPose']['yaw']
faceInfo['roll'] = detectedFace['attributes']['headPose']['roll']
faceInfo['pitch'] = detectedFace['attributes']['headPose']['pitch']
faceInfo['faceid'] = detectedFace['faceId']
faceInfo['width'] = detectedFace['faceRectangle']['width']
faceInfo['top'] = detectedFace['faceRectangle']['top']
faceInfo['height'] = detectedFace['faceRectangle']['height']
faceInfo['left'] = detectedFace['faceRectangle']['left']
faceInfo['faceLandmarks'] = detectedFace['faceLandmarks']
faceInfo['eyeLeftOuter'] = detectedFace['faceLandmarks']['eyeLeftOuter']
faceInfo['eyeLeftInner'] = detectedFace['faceLandmarks']['eyeLeftInner']
faceInfo['eyeLeftTop'] = detectedFace['faceLandmarks']['eyeLeftTop']
faceInfo['eyeLeftBottom'] = detectedFace['faceLandmarks']['eyeLeftBottom']
faceInfo['eyeRightOuter'] = detectedFace['faceLandmarks']['eyeRightOuter']
faceInfo['eyeRightInner'] = detectedFace['faceLandmarks']['eyeRightInner']
faceInfo['eyeRightTop'] = detectedFace['faceLandmarks']['eyeRightTop']
faceInfo['eyeRightBottom'] = detectedFace['faceLandmarks']['eyeRightBottom']
faceInfo['upperLipBottom'] = detectedFace['faceLandmarks']['upperLipBottom']
faceInfo['underLipBottom'] = detectedFace['faceLandmarks']['underLipBottom']
faceInfo['mouthLeft'] = detectedFace['faceLandmarks']['mouthLeft']
faceInfo['mouthRight'] = detectedFace['faceLandmarks']['mouthRight']
faceInfos.append(faceInfo)
#assert(detectedFace['attributes']['gender'] =='female' or detectedFace['attributes']['gender'] == 'male')
return faceInfos
def getEyePosition(faceInfo):
eyePosLeft_x = round(0.25 * (faceInfo['eyeLeftOuter']['x'] + faceInfo['eyeLeftInner']['x'] + faceInfo['eyeLeftTop']['x'] + faceInfo['eyeLeftBottom']['x']))
eyePosLeft_y = round(0.25 * (faceInfo['eyeLeftOuter']['y'] + faceInfo['eyeLeftInner']['y'] + faceInfo['eyeLeftTop']['y'] + faceInfo['eyeLeftBottom']['y']))
eyePosRight_x = round(0.25 * (faceInfo['eyeRightOuter']['x'] + faceInfo['eyeRightInner']['x'] + faceInfo['eyeRightTop']['x'] + faceInfo['eyeRightBottom']['x']))
eyePosRight_y = round(0.25 * (faceInfo['eyeRightOuter']['y'] + faceInfo['eyeRightInner']['y'] + faceInfo['eyeRightTop']['y'] + faceInfo['eyeRightBottom']['y']))
return ((eyePosLeft_x, eyePosLeft_y), (eyePosRight_x, eyePosRight_y))
def getMouthPosition(faceInfo):
mouthPos_x = round(0.5 * (faceInfo['mouthLeft']['x'] + faceInfo['mouthRight']['x']) )
mouthPos_y = round(0.5 * (faceInfo['mouthLeft']['y'] + faceInfo['mouthRight']['y']) )
return (mouthPos_x, mouthPos_y)
def getFaceCoordinates(faceInfo):
w, h = faceInfo['width'], faceInfo['height']
faceLU = (faceInfo['left'], faceInfo['top'])
faceRU = (faceLU[0] + w, faceLU[1])
faceLB = (faceLU[0] , faceLU[1] + h)
faceRB = (faceLU[0] + w, faceLU[1] + h)
return (faceLU, faceRU, | |
uk_107
+ 445500 * uk_108
+ 5177717 * uk_109
+ 8774387 * uk_11
+ 2693610 * uk_110
+ 359148 * uk_111
+ 5955871 * uk_112
+ 6734025 * uk_113
+ 2693610 * uk_114
+ 1401300 * uk_115
+ 186840 * uk_116
+ 3098430 * uk_117
+ 3503250 * uk_118
+ 1401300 * uk_119
+ 4564710 * uk_12
+ 24912 * uk_120
+ 413124 * uk_121
+ 467100 * uk_122
+ 186840 * uk_123
+ 6850973 * uk_124
+ 7746075 * uk_125
+ 3098430 * uk_126
+ 8758125 * uk_127
+ 3503250 * uk_128
+ 1401300 * uk_129
+ 608628 * uk_13
+ 729000 * uk_130
+ 97200 * uk_131
+ 1611900 * uk_132
+ 1822500 * uk_133
+ 729000 * uk_134
+ 12960 * uk_135
+ 214920 * uk_136
+ 243000 * uk_137
+ 97200 * uk_138
+ 3564090 * uk_139
+ 10093081 * uk_14
+ 4029750 * uk_140
+ 1611900 * uk_141
+ 4556250 * uk_142
+ 1822500 * uk_143
+ 729000 * uk_144
+ 1728 * uk_145
+ 28656 * uk_146
+ 32400 * uk_147
+ 12960 * uk_148
+ 475212 * uk_149
+ 11411775 * uk_15
+ 537300 * uk_150
+ 214920 * uk_151
+ 607500 * uk_152
+ 243000 * uk_153
+ 97200 * uk_154
+ 7880599 * uk_155
+ 8910225 * uk_156
+ 3564090 * uk_157
+ 10074375 * uk_158
+ 4029750 * uk_159
+ 4564710 * uk_16
+ 1611900 * uk_160
+ 11390625 * uk_161
+ 4556250 * uk_162
+ 1822500 * uk_163
+ 729000 * uk_164
+ 3025 * uk_17
+ 9515 * uk_18
+ 4950 * uk_19
+ 55 * uk_2
+ 660 * uk_20
+ 10945 * uk_21
+ 12375 * uk_22
+ 4950 * uk_23
+ 29929 * uk_24
+ 15570 * uk_25
+ 2076 * uk_26
+ 34427 * uk_27
+ 38925 * uk_28
+ 15570 * uk_29
+ 173 * uk_3
+ 8100 * uk_30
+ 1080 * uk_31
+ 17910 * uk_32
+ 20250 * uk_33
+ 8100 * uk_34
+ 144 * uk_35
+ 2388 * uk_36
+ 2700 * uk_37
+ 1080 * uk_38
+ 39601 * uk_39
+ 90 * uk_4
+ 44775 * uk_40
+ 17910 * uk_41
+ 50625 * uk_42
+ 20250 * uk_43
+ 8100 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 445028134253 * uk_47
+ 231517526490 * uk_48
+ 30869003532 * uk_49
+ 12 * uk_5
+ 511910975239 * uk_50
+ 578793816225 * uk_51
+ 231517526490 * uk_52
+ 153424975 * uk_53
+ 482591285 * uk_54
+ 251059050 * uk_55
+ 33474540 * uk_56
+ 555119455 * uk_57
+ 627647625 * uk_58
+ 251059050 * uk_59
+ 199 * uk_6
+ 1517968951 * uk_60
+ 789694830 * uk_61
+ 105292644 * uk_62
+ 1746103013 * uk_63
+ 1974237075 * uk_64
+ 789694830 * uk_65
+ 410823900 * uk_66
+ 54776520 * uk_67
+ 908377290 * uk_68
+ 1027059750 * uk_69
+ 225 * uk_7
+ 410823900 * uk_70
+ 7303536 * uk_71
+ 121116972 * uk_72
+ 136941300 * uk_73
+ 54776520 * uk_74
+ 2008523119 * uk_75
+ 2270943225 * uk_76
+ 908377290 * uk_77
+ 2567649375 * uk_78
+ 1027059750 * uk_79
+ 90 * uk_8
+ 410823900 * uk_80
+ 166375 * uk_81
+ 523325 * uk_82
+ 272250 * uk_83
+ 36300 * uk_84
+ 601975 * uk_85
+ 680625 * uk_86
+ 272250 * uk_87
+ 1646095 * uk_88
+ 856350 * uk_89
+ 2572416961 * uk_9
+ 114180 * uk_90
+ 1893485 * uk_91
+ 2140875 * uk_92
+ 856350 * uk_93
+ 445500 * uk_94
+ 59400 * uk_95
+ 985050 * uk_96
+ 1113750 * uk_97
+ 445500 * uk_98
+ 7920 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 88440 * uk_100
+ 99000 * uk_101
+ 76120 * uk_102
+ 2222055 * uk_103
+ 2487375 * uk_104
+ 1912515 * uk_105
+ 2784375 * uk_106
+ 2140875 * uk_107
+ 1646095 * uk_108
+ 300763 * uk_109
+ 3398173 * uk_11
+ 776597 * uk_110
+ 35912 * uk_111
+ 902289 * uk_112
+ 1010025 * uk_113
+ 776597 * uk_114
+ 2005243 * uk_115
+ 92728 * uk_116
+ 2329791 * uk_117
+ 2607975 * uk_118
+ 2005243 * uk_119
+ 8774387 * uk_12
+ 4288 * uk_120
+ 107736 * uk_121
+ 120600 * uk_122
+ 92728 * uk_123
+ 2706867 * uk_124
+ 3030075 * uk_125
+ 2329791 * uk_126
+ 3391875 * uk_127
+ 2607975 * uk_128
+ 2005243 * uk_129
+ 405752 * uk_13
+ 5177717 * uk_130
+ 239432 * uk_131
+ 6015729 * uk_132
+ 6734025 * uk_133
+ 5177717 * uk_134
+ 11072 * uk_135
+ 278184 * uk_136
+ 311400 * uk_137
+ 239432 * uk_138
+ 6989373 * uk_139
+ 10194519 * uk_14
+ 7823925 * uk_140
+ 6015729 * uk_141
+ 8758125 * uk_142
+ 6734025 * uk_143
+ 5177717 * uk_144
+ 512 * uk_145
+ 12864 * uk_146
+ 14400 * uk_147
+ 11072 * uk_148
+ 323208 * uk_149
+ 11411775 * uk_15
+ 361800 * uk_150
+ 278184 * uk_151
+ 405000 * uk_152
+ 311400 * uk_153
+ 239432 * uk_154
+ 8120601 * uk_155
+ 9090225 * uk_156
+ 6989373 * uk_157
+ 10175625 * uk_158
+ 7823925 * uk_159
+ 8774387 * uk_16
+ 6015729 * uk_160
+ 11390625 * uk_161
+ 8758125 * uk_162
+ 6734025 * uk_163
+ 5177717 * uk_164
+ 3025 * uk_17
+ 3685 * uk_18
+ 9515 * uk_19
+ 55 * uk_2
+ 440 * uk_20
+ 11055 * uk_21
+ 12375 * uk_22
+ 9515 * uk_23
+ 4489 * uk_24
+ 11591 * uk_25
+ 536 * uk_26
+ 13467 * uk_27
+ 15075 * uk_28
+ 11591 * uk_29
+ 67 * uk_3
+ 29929 * uk_30
+ 1384 * uk_31
+ 34773 * uk_32
+ 38925 * uk_33
+ 29929 * uk_34
+ 64 * uk_35
+ 1608 * uk_36
+ 1800 * uk_37
+ 1384 * uk_38
+ 40401 * uk_39
+ 173 * uk_4
+ 45225 * uk_40
+ 34773 * uk_41
+ 50625 * uk_42
+ 38925 * uk_43
+ 29929 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 172351936387 * uk_47
+ 445028134253 * uk_48
+ 20579335688 * uk_49
+ 8 * uk_5
+ 517055809161 * uk_50
+ 578793816225 * uk_51
+ 445028134253 * uk_52
+ 153424975 * uk_53
+ 186899515 * uk_54
+ 482591285 * uk_55
+ 22316360 * uk_56
+ 560698545 * uk_57
+ 627647625 * uk_58
+ 482591285 * uk_59
+ 201 * uk_6
+ 227677591 * uk_60
+ 587883929 * uk_61
+ 27185384 * uk_62
+ 683032773 * uk_63
+ 764588925 * uk_64
+ 587883929 * uk_65
+ 1517968951 * uk_66
+ 70195096 * uk_67
+ 1763651787 * uk_68
+ 1974237075 * uk_69
+ 225 * uk_7
+ 1517968951 * uk_70
+ 3246016 * uk_71
+ 81556152 * uk_72
+ 91294200 * uk_73
+ 70195096 * uk_74
+ 2049098319 * uk_75
+ 2293766775 * uk_76
+ 1763651787 * uk_77
+ 2567649375 * uk_78
+ 1974237075 * uk_79
+ 173 * uk_8
+ 1517968951 * uk_80
+ 166375 * uk_81
+ 202675 * uk_82
+ 523325 * uk_83
+ 24200 * uk_84
+ 608025 * uk_85
+ 680625 * uk_86
+ 523325 * uk_87
+ 246895 * uk_88
+ 637505 * uk_89
+ 2572416961 * uk_9
+ 29480 * uk_90
+ 740685 * uk_91
+ 829125 * uk_92
+ 637505 * uk_93
+ 1646095 * uk_94
+ 76120 * uk_95
+ 1912515 * uk_96
+ 2140875 * uk_97
+ 1646095 * uk_98
+ 3520 * uk_99,
uk_0
+ 50719 * uk_1
+ | |
import math
import itertools
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import scipy.stats as ss
import scikit_posthocs as sp
from dash_table.Format import Format, Scheme
from Bio import Phylo
from ete3 import Tree
from plotly.subplots import make_subplots
# -------------------------------------------------------------------------------------
# --------------------------------------- Classes -------------------------------------
class DrawTree():
def __init__(self, newicktree, template, topology, color_map, branch_len, font_family):
self.newicktree = Phylo.read(newicktree, "newick")
self.template = template
self.topology = topology
self.color_map = color_map
self.branch_len = branch_len
self.font_family = font_family
def create_square_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
if self.branch_len:
xcoords = tree.depths(unit_branch_lengths=True)
else:
xcoords = tree.depths()
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1.3):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
ycoords = dict(
(leaf, maxheight - i * dist)
for i, leaf in enumerate(reversed(tree.get_terminals()))
)
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
# This is intermediate placement of internal nodes
ycoords[clade] = (ycoords[clade.clades[0]] + ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="white",
line_width=2,
root_clade = False
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if root_clade:
branch_line.update(x0=-0.01, y0=y_curr, x1=-0.01, y1=y_curr)
return branch_line
elif orientation == "horizontal":
branch_line.update(x0=x_start, y0=y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="white",
line_width=2,
x_coords=0,
y_coords=0,
init_clade=False,
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
if init_clade:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=True,
)
else:
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
root_clade=False,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_coords[clade.clades[0]]
y_bot = y_coords[clade.clades[-1]]
line_shapes.append(
get_clade_lines(
orientation="vertical",
x_curr=x_curr,
y_bot=y_bot,
y_top=y_top,
line_color=line_color,
line_width=line_width,
)
)
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes,
x_coords=x_coords, y_coords=y_coords,
line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
tree = self.newicktree
tree.ladderize()
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
init_clade=True,
)
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
if not cl.name:
text.append(" ")
else:
text.append(cl.name)
else:
text.append(cl.name)
axis = dict(
showline=False,
visible=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title="", # y title
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='middle right',
textfont=dict(color=text_color, size=12),
showlegend=False,
name=elt,
)
nodes.append(node)
# Set graph x-range
if self.branch_len:
x_range = [-0.5, (max(x_coords.values())+2)]
show_xaxis = False
elif max(x_coords.values()) < 0.1:
x_range = [0, (max(x_coords.values())+(max(x_coords.values())*1.25))]
show_xaxis = True
elif max(x_coords.values()) < 0.5:
x_range = [0, 0.5]
show_xaxis = True
elif max(x_coords.values()) < 1:
x_range = [0, 1]
show_xaxis = True
elif max(x_coords.values()) == 1:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
else:
x_range = [0, max(x_coords.values())+2]
show_xaxis = False
layout = dict(
autosize=True,
showlegend=False,
template=self.template,
dragmode="pan",
margin=dict(t=20, b=10, r=20, l=10),
xaxis=dict(
showline=True,
zeroline=False,
visible=show_xaxis,
showgrid=False,
showticklabels=True,
range=x_range,
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
font=dict(family=self.font_family,size=14),
)
fig = go.Figure(data=nodes, layout=layout)
return fig
def create_angular_tree(self):
def get_x_coordinates(tree):
"""Associates to each clade an x-coord.
returns dict {clade: x-coord}
"""
# xcoords = tree.depths(unit_branch_lengths=True)
# print("===========================")
# nodes = [n for n in tree.find_clades()]
# nodes = tree.get_terminals() + tree.get_nonterminals()
# print(tree.root.clades)
# root_xcoord = {tree.root.clades[1]:0}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_xcoords = dict((leaf, i) for i, leaf in enumerate(terminal_nodes))
internal_xcoords = dict(
(leaf, i+0.5) for leaf, i in zip(internal_nodes, range(1, len(internal_nodes)))
)
xcoords = {**terminal_xcoords, **internal_xcoords}
# print(xcoords)
# print("===========================")
# tree.depth() maps tree clades to depths (by branch length).
# returns a dict {clade: depth} where clade runs over all Clade instances of the tree, and depth
# is the distance from root to clade
# If there are no branch lengths, assign unit branch lengths
if not max(xcoords.values()):
xcoords = tree.depths(unit_branch_lengths=True)
return xcoords
def get_y_coordinates(tree, dist=1):
"""
returns dict {clade: y-coord}
The y-coordinates are (float) multiple of integers (i*dist below)
dist depends on the number of tree leafs
"""
maxheight = tree.count_terminals() # Counts the number of tree leafs.
# Rows are defined by the tips/leafs
# root_ycoord = {tree.root:maxheight}
terminal_nodes = tree.get_terminals()
internal_nodes = tree.get_nonterminals()
terminal_ycoords = dict((leaf, 1) for _, leaf in enumerate(terminal_nodes))
internal_ycoords = dict(
(leaf, i) for leaf, i in zip(internal_nodes, reversed(range(1, len(internal_nodes))))
)
ycoords = {**terminal_ycoords, **internal_ycoords}
def calc_row(clade):
for subclade in clade:
if subclade not in ycoords:
calc_row(subclade)
ycoords[clade] = (ycoords[clade.clades[0]] +
ycoords[clade.clades[-1]]) / 2
if tree.root.clades:
calc_row(tree.root)
return ycoords
def get_clade_lines(
orientation="horizontal",
y_curr=0,
last_y_curr=0,
x_start=0,
x_curr=0,
y_bot=0,
y_top=0,
line_color="rgb(25,25,25)",
line_width=0.5,
init_flag=False,
):
"""define a shape of type 'line', for branch
"""
branch_line = dict(
type="line", layer="below", line=dict(color=line_color, width=line_width)
)
if orientation == "horizontal":
if init_flag:
branch_line.update(x0=x_start, y0=y_curr,
x1=x_curr, y1=y_curr)
else:
branch_line.update(
x0=x_start, y0=last_y_curr, x1=x_curr, y1=y_curr)
elif orientation == "vertical":
branch_line.update(x0=x_curr, y0=y_bot, x1=x_curr, y1=y_top)
else:
raise ValueError("Line type can be 'horizontal' or 'vertical'")
return branch_line
def draw_clade(
clade,
x_start,
line_shapes,
line_color="rgb(15,15,15)",
line_width=1,
x_coords=0,
y_coords=0,
last_clade_y_coord=0,
init_flag=True
):
"""Recursively draw the tree branches, down from the given clade"""
x_curr = x_coords[clade]
y_curr = y_coords[clade]
# Draw a horizontal line from start to here
branch_line = get_clade_lines(
orientation="horizontal",
y_curr=y_curr,
last_y_curr=last_clade_y_coord,
x_start=x_start,
x_curr=x_curr,
line_color=line_color,
line_width=line_width,
init_flag=init_flag,
)
line_shapes.append(branch_line)
if clade.clades:
# Draw descendants
for child in clade:
draw_clade(child, x_curr, line_shapes, x_coords=x_coords,
y_coords=y_coords, last_clade_y_coord=y_coords[clade],
init_flag=False, line_color=line_color)
if 'dark' in self.template:
text_color = 'white'
else:
text_color = 'black'
line_color = self.color_map[self.topology]
# Load in Tree object and ladderize
tree = self.newicktree
tree.ladderize()
# Get coordinates + put into dictionary
# dict(keys=clade_names, values=)
x_coords = get_x_coordinates(tree)
y_coords = get_y_coordinates(tree)
line_shapes = []
draw_clade(
tree.root,
0,
line_shapes,
line_color=line_color,
line_width=2,
x_coords=x_coords,
y_coords=y_coords,
)
#
my_tree_clades = x_coords.keys()
X = []
Y = []
text = []
for cl in my_tree_clades:
X.append(x_coords[cl])
Y.append(y_coords[cl])
# Add confidence values if internal node
if not cl.name:
text.append(cl.confidence)
else:
text.append(cl.name)
axis = dict(
showline=False,
zeroline=False,
showgrid=False,
visible=False,
showticklabels=False,
)
label_legend = ["Tree_1"]
nodes = []
for elt in label_legend:
node = dict(
type="scatter",
x=X,
y=Y,
mode="markers+text",
marker=dict(color=text_color, size=5),
text=text, # vignet information of each node
textposition='right',
textfont=dict(color=text_color, size=25),
showlegend=False,
name=elt,
)
nodes.append(node)
layout = dict(
template=self.template,
dragmode="select",
autosize=True,
showlegend=True,
xaxis=dict(
showline=True,
zeroline=False,
visible=False,
showgrid=False,
showticklabels=True,
range=[0, (max(x_coords.values())+2)]
),
yaxis=axis,
hovermode="closest",
shapes=line_shapes,
legend={"x": 0, "y": 1},
font=dict(family="Open Sans"),
)
fig = dict(data=nodes, layout=layout)
return fig
def create_circular_tree(self):
def get_circular_tree_data(tree, order='level', dist=1, start_angle=0, end_angle=360, start_leaf='first'):
"""Define data needed to get the Plotly plot of a circular tree
Source code found at: https://chart-studio.plotly.com/~empet/14834.embed
"""
# tree: an instance of Bio.Phylo.Newick.Tree or Bio.Phylo.PhyloXML.Phylogeny
# order: tree traversal method to associate polar coordinates to its nodes
# dist: the vertical distance between two consecutive leafs in the associated rectangular tree layout
# start_angle: angle in degrees representing the angle of the first leaf mapped to a circle
# end_angle: angle in degrees representing the angle of the last leaf
# the list of leafs mapped in anticlockwise direction onto circles can be tree.get_terminals()
# or its reversed version tree.get_terminals()[::-1].
# start leaf: is a keyword with two possible values"
# 'first': to map the leafs in the list tree.get_terminals() onto a circle,
| |
"/location/location",
"/location/citytown",
"/user/joehughes/default_domain/transit_service_area",
"/location/dated_location",
"/location/statistical_region",
"/government/governmental_jurisdiction"
],
"id": "/en/duluth",
"name": "Duluth"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadObjDefaultPropertiesOfValueProperty(self):
"""read obj default properties of value property."""
query = """
{
"/people/person/date_of_birth": {},
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"/people/person/date_of_birth": {
"type": "/type/datetime",
"value": "1941-05-24"
},
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadWithExpectedTypes1(self):
"""read with expected types 1."""
query = """
{
"date_of_birth": null,
"type": "/people/person",
"id": "/en/bob_dylan",
"place_of_birth": null
}
"""
exp_response = """
{
"date_of_birth": "1941-05-24",
"type": "/people/person",
"id": "/en/bob_dylan",
"place_of_birth": "Duluth"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadWithExpectedTypes2(self):
"""read with expected types 2."""
query = """
{
"date_of_birth": null,
"type": "/people/person",
"id": "/en/bob_dylan",
"place_of_birth": {
"geolocation": {
"latitude": null,
"longitude": null
}
}
}
"""
exp_response = """
{
"date_of_birth": "1941-05-24",
"type": "/people/person",
"id": "/en/bob_dylan",
"place_of_birth": {
"geolocation": {
"latitude": 46.783299999999997,
"longitude": -92.106399999999994
}
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadWithExpectedTypesOfReverseProperty(self):
"""read with expected types of reverse property."""
query = """
{
"/people/profession/people_with_this_profession": [
{
"limit": 2,
"profession": [],
"name": null
}
],
"id": "/en/songwriter"
}
"""
exp_response = """
{
"/people/profession/people_with_this_profession": [
{
"profession": [
"Singer",
"Songwriter",
"Record producer",
"Bassist",
"Composer",
"Keyboard player"
],
"name": "<NAME>"
},
{
"profession": [
"Songwriter"
],
"name": "<NAME>"
}
],
"id": "/en/songwriter"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadWithExpectedTypesOfReversedMasterProperty(self):
"""read with expected types of reversed master property."""
query = """
{
"id": "/en/songwriter",
"!/people/person/profession": [
{
"profession": [],
"limit": 2,
"name": null
}
]
}
"""
exp_response = """
{
"id": "/en/songwriter",
"!/people/person/profession": [
{
"profession": [
"Singer",
"Songwriter",
"Record producer",
"Bassist",
"Composer",
"Keyboard player"
],
"name": "<NAME>"
},
{
"profession": [
"Songwriter"
],
"name": "<NAME>"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadWithExpectedTypesOfReversedReverseProperty(self):
"""read with expected types of reversed reverse property."""
query = """
{
"!/people/profession/people_with_this_profession": [
{
"specialization_of": null,
"limit": 2,
"name": null
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"type": "/people/person",
"!/people/profession/people_with_this_profession": [
{
"specialization_of": "Musician",
"name": "Songwriter"
},
{
"specialization_of": null,
"name": "Writer"
}
],
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadEmptyArrayOfObjProperty(self):
"""read empty array of obj property."""
query = """
{
"religion": [],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"religion": [
"Christianity",
"Judaism"
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadEmptyArrayOfValueProperty(self):
"""read empty array of value property."""
query = """
{
"alias": [],
"type": "/common/topic",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"alias": [
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"
],
"type": "/common/topic",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadObjArrayOfObjProperty(self):
"""read obj array of obj property."""
query = """
{
"religion": [
{
"id": null
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"religion": [
{
"id": "/en/christianity"
},
{
"id": "/en/judaism"
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadObjArrayOfValueProperty(self):
"""read obj array of value property."""
query = """
{
"date_of_birth": [
{
"value": null
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"date_of_birth": [
{
"value": "1941-05-24"
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadArrayOfTextPropertyWithDefaultLang(self):
"""read array of text property with default lang."""
query = """
{
"id": "/en/bob_dylan",
"name": [
{
"value": null
}
]
}
"""
exp_response = """
{
"id": "/en/bob_dylan",
"name": [
{
"value": "<NAME>"
},
{
"value": "\u0414\u0438\u043b\u0430\u043d, \u0411\u043e\u0431"
},
{
"value": "\u0411\u043e\u0431 \u0414\u0456\u043b\u0430\u043d"
},
{
"value": "\u30dc\u30d6\u30fb\u30c7\u30a3\u30e9\u30f3"
},
{
"value": "\u9c8d\u52c3\u00b7\u8fea\u4f26"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadArrayOfTextPropertyWithLang(self):
"""read array of text property with lang."""
query = """
{
"id": "/en/bob_dylan",
"name": [
{
"lang": null,
"value": null
}
]
}
"""
exp_response = """
{
"id": "/en/bob_dylan",
"name": [
{
"lang": "/lang/en",
"value": "<NAME>"
},
{
"lang": "/lang/ru",
"value": "\u0414\u0438\u043b\u0430\u043d, \u0411\u043e\u0431"
},
{
"lang": "/lang/uk",
"value": "\u0411\u043e\u0431 \u0414\u0456\u043b\u0430\u043d"
},
{
"lang": "/lang/ja",
"value": "\u30dc\u30d6\u30fb\u30c7\u30a3\u30e9\u30f3"
},
{
"lang": "/lang/zh",
"value": "\u9c8d\u52c3\u00b7\u8fea\u4f26"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadObjArrayWithLimit(self):
"""read obj array with limit."""
query = """
{
"religion": [
{
"limit": 1,
"id": null
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"religion": [
{
"id": "/en/christianity"
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadEmptyObjArrayDefaultPropertiesOfObjProperty(self):
"""read empty obj array default properties of obj property."""
query = """
{
"religion": [
{}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"religion": [
{
"type": [
"/common/topic",
"/religion/religion",
"/broadcast/genre",
"/exhibitions/exhibition_subject",
"/book/book_subject",
"/user/tsegaran/random/taxonomy_subject",
"/people/profession",
"/base/godbase/topic",
"/base/popstra/sww_base",
"/base/popstra/religion",
"/base/popstra/topic",
"/base/christianity/topic",
"/base/argumentmaps/thing_of_disputed_value",
"/base/argumentmaps/topic"
],
"id": "/en/christianity",
"name": "Christianity"
},
{
"type": [
"/common/topic",
"/religion/religion",
"/broadcast/genre",
"/exhibitions/exhibition_subject",
"/military/military_combatant",
"/book/book_subject",
"/user/tsegaran/random/taxonomy_subject",
"/base/symbols/topic",
"/base/symbols/symbolized_concept",
"/organization/organization_sector",
"/base/godbase/topic",
"/m/05qry21",
"/education/field_of_study",
"/base/jewlib/topic",
"/base/jewlib/jewish_studies_field",
"/base/popstra/sww_base",
"/base/popstra/religion",
"/base/popstra/topic",
"/fictional_universe/ethnicity_in_fiction",
"/base/eating/practicer_of_diet"
],
"id": "/en/judaism",
"name": "Judaism"
}
],
"type": "/people/person",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadEmptyObjArrayDefaultPropertiesOfValueProperty(self):
"""read empty obj array default properties of value property."""
query = """
{
"alias": [
{}
],
"type": "/common/topic",
"id": "/en/bob_dylan"
}
"""
exp_response = """
{
"alias": [
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/he",
"type": "/type/text",
"value": "\u05d1\u05d5\u05d1 \u05d3\u05d9\u05dc\u05df"
},
{
"lang": "/lang/es",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/zh",
"type": "/type/text",
"value": "\u9c8d\u52c3\u00b7\u8fea\u4f26"
},
{
"lang": "/lang/fr",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/it",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/de",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/sk",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/hu",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/id",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/ro",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/tr",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/ru",
"type": "/type/text",
"value": "\u0414\u0438\u043b\u0430\u043d, \u0411\u043e\u0431"
},
{
"lang": "/lang/sr",
"type": "/type/text",
"value": "\u0411\u043e\u0431 \u0414\u0438\u043b\u0430\u043d"
},
{
"lang": "/lang/ja",
"type": "/type/text",
"value": "\u30dc\u30d6\u30fb\u30c7\u30a3\u30e9\u30f3"
},
{
"lang": "/lang/ca",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/sl",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "Blind Boy Grunt"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
},
{
"lang": "/lang/en",
"type": "/type/text",
"value": "<NAME>"
}
],
"type": "/common/topic",
"id": "/en/bob_dylan"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeAttributionAttributed(self):
"""read /type/attribution/attributed."""
query = """
{
"/type/attribution/attributed": {
"id": null
},
"id": "/user/warren"
}
"""
exp_response = """
{
"/type/attribution/attributed": {
"id": "/m/07n73yp"
},
"id": "/user/warren"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testConstrainTypeAttributionAttributed(self):
"""constrain /type/attribution/attributed."""
query = """
{
"/type/attribution/attributed": {
"id": "/guid/9202a8c04000641f800000000f438fb5"
},
"id": "/user/warren"
}
"""
exp_response = """
{
"/type/attribution/attributed": {
"id": "/guid/9202a8c04000641f800000000f438fb5"
},
"id": "/user/warren"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeAttributionAttributedAsBangTypeObjectAttribution(self):
"""read /type/attribution/attributed as !/type/object/attribution."""
query = """
{
"!/type/object/attribution": {
"id": null
},
"id": "/user/warren"
}
"""
exc_response = (
error.MQLTypeError,
"Can't reverse artificial property /type/object/attribution"
)
self.DoQuery(query, exc_response=exc_response)
def testConstrainTypeAttributionAttributedAsBangTypeObjectAttribution(self):
"""constrain /type/attribution/attributed as !/type/object/attribution."""
query = """
{
"!/type/object/attribution": {
"id": "/guid/9202a8c04000641f800000000f438fb5"
},
"id": "/user/warren"
}
"""
exc_response = (
error.MQLTypeError,
"Can't reverse artificial property /type/object/attribution"
)
self.DoQuery(query, exc_response=exc_response)
def testReadTypeObjectKeyProperties(self):
"""read /type/object/key properties."""
query = """
{
"id": "/en/bob_dylan",
"key": {
"namespace": null,
"value": "<KEY>"
}
}
"""
exp_response = """
{
"id": "/en/bob_dylan",
"key": {
"namespace": "/authority/musicbrainz",
"value": "<KEY>"
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadTypeNamespaceKeysProperties(self):
"""read /type/namespace/keys properties."""
query = """
{
"/type/namespace/keys": [
{
"limit": 2,
"namespace": null,
"value": null
}
],
"id": "/authority/musicbrainz"
}
"""
exp_response = """
{
"/type/namespace/keys": [
{
"namespace": "/authority/musicbrainz/name",
"value": "name"
},
{
"namespace": "/en/extended_play",
"value": "ALBUMTYPE3"
}
],
"id": "/authority/musicbrainz"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testReadStructuredTypeKeyNamespace(self):
"""read structured /type/key/namespace."""
query = """
{
"/type/namespace/keys": [
{
"namespace": {
"guid": null,
"id": null
},
"limit": 2,
"value": null
}
],
"id": "/authority/musicbrainz"
}
"""
exp_response = """
{
"/type/namespace/keys": [
{
"namespace": {
"guid": "#9202a8c04000641f8000000001143432",
"id": "/authority/musicbrainz/name"
},
"value": "name"
},
{
"namespace": {
"guid": "#9202a8c04000641f80000000012091b6",
| |
# # PACKED-BED REACTOR MODEL
# # -------------------------
# # import packages/modules
# import math as MATH
# import numpy as np
# from library.plot import plotClass as pltc
# from scipy.integrate import solve_ivp
# # internal
# from core.errors import errGeneralClass as errGeneral
# from data.inputDataReactor import *
# from core import constants as CONST
# from core.utilities import roundNum
# from core.config import REACTION_RATE_ACCURACY
# from .rmtUtility import rmtUtilityClass as rmtUtil
# from .rmtThermo import *
# class PackedBedReactorClass:
# # def main():
# """
# Packed-bed Reactor Model
# """
# # internal data
# _internalData = []
# def __init__(self, modelInput, internalData, reactionListSorted):
# self.modelInput = modelInput
# self.internalData = internalData
# self.reactionListSorted = reactionListSorted
# @property
# def internalData(cls):
# return cls._internalData
# @internalData.setter
# def internalData(cls, value):
# cls._internalData.clear()
# cls._internalData.extend(value)
# def runM1(self):
# """
# M1 modeling case
# """
# # operating conditions
# P = self.modelInput['operating-conditions']['pressure']
# T = self.modelInput['operating-conditions']['temperature']
# # ->
# modelParameters = {
# "pressure": P,
# "temperature": T
# }
# # component list
# compList = self.modelInput['feed']['components']['shell']
# labelList = compList.copy()
# labelList.append("Flux")
# # initial values
# # -> mole fraction
# MoFri = self.modelInput['feed']['mole-fraction']
# # -> flux [kmol/m^2.s]
# MoFl = self.modelInput['feed']['molar-flux']
# IV = []
# IV.extend(MoFri)
# IV.append(MoFl)
# # print(f"IV: {IV}")
# # time span
# # t = (0.0, rea_L)
# t = np.array([0, rea_L])
# times = np.linspace(t[0], t[1], 20)
# # tSpan = np.linspace(0, rea_L, 25)
# # ode call
# sol = solve_ivp(PackedBedReactorClass.modelEquationM1,
# t, IV, method="LSODA", t_eval=times, args=(P, T))
# # ode result
# successStatus = sol.success
# dataX = sol.t
# dataYs = sol.y
# # check
# if successStatus is True:
# # plot setting
# XYList = pltc.plots2DSetXYList(dataX, dataYs)
# # -> label
# dataList = pltc.plots2DSetDataList(XYList, labelList)
# # plot result
# pltc.plots2D(dataList, "Reactor Length (m)",
# "Concentration (mol/m^3)", "1D Plug-Flow Reactor")
# else:
# XYList = []
# dataList = []
# # return
# res = {
# "XYList": XYList,
# "dataList": dataList
# }
# return res
# def runM2(self):
# """
# M2 modeling case
# """
# # operating conditions
# P = self.modelInput['operating-conditions']['pressure']
# T = self.modelInput['operating-conditions']['temperature']
# # reaction list
# reactionDict = self.modelInput['reactions']
# reactionList = rmtUtil.buildReactionList(reactionDict)
# # component list
# compList = self.modelInput['feed']['components']['shell']
# labelList = compList.copy()
# labelList.append("Flux")
# labelList.append("Temperature")
# # component no
# compNo = len(compList)
# indexFlux = compNo
# indexTemp = indexFlux + 1
# # initial values
# # -> mole fraction
# MoFri = self.modelInput['feed']['mole-fraction']
# # -> flux [kmol/m^2.s]
# MoFl = self.modelInput['feed']['molar-flux']
# IV = []
# IV.extend(MoFri)
# IV.append(MoFl)
# IV.append(T)
# print(f"IV: {IV}")
# IVSet = np.array(IV)
# # parameters
# # component data
# reactionListSorted = self.reactionListSorted
# # standard heat of reaction at 25C [kJ/kmol]
# StHeRe25 = np.array(
# list(map(calStandardEnthalpyOfReaction, reactionList)))
# # time span
# # t = (0.0, rea_L)
# t = np.array([0, rea_L])
# t_span = np.array([0, rea_L])
# times = np.linspace(t_span[0], t_span[1], 100)
# # tSpan = np.linspace(0, rea_L, 25)
# # ode call
# sol = solve_ivp(PackedBedReactorClass.modelEquationM2,
# t, IVSet, method="LSODA", t_eval=times, args=(P, compList, StHeRe25, reactionListSorted))
# # ode result
# successStatus = sol.success
# dataX = sol.t
# # all results
# dataYs = sol.y
# # concentration
# dataYs1 = sol.y[0:compNo, :]
# labelListYs1 = labelList[0:compNo]
# # flux
# dataYs2 = sol.y[indexFlux, :]
# labelListYs2 = labelList[indexFlux]
# # temperature
# dataYs3 = sol.y[indexTemp, :]
# labelListYs3 = labelList[indexTemp]
# # check
# if successStatus is True:
# # plot setting: build (x,y) series
# XYList = pltc.plots2DSetXYList(dataX, dataYs)
# # -> add label
# dataList = pltc.plots2DSetDataList(XYList, labelList)
# # datalists
# dataLists = [dataList[0:compNo],
# dataList[indexFlux], dataList[indexTemp]]
# # subplot result
# pltc.plots2DSub(dataLists, "Reactor Length (m)",
# "Concentration (mol/m^3)", "1D Plug-Flow Reactor")
# # plot result
# # pltc.plots2D(dataList[0:compNo], "Reactor Length (m)",
# # "Concentration (mol/m^3)", "1D Plug-Flow Reactor")
# # pltc.plots2D(dataList[indexFlux], "Reactor Length (m)",
# # "Flux (kmol/m^2.s)", "1D Plug-Flow Reactor")
# # pltc.plots2D(dataList[indexTemp], "Reactor Length (m)",
# # "Temperature (K)", "1D Plug-Flow Reactor")
# else:
# XYList = []
# dataList = []
# # return
# res = {
# "XYList": XYList,
# "dataList": dataList
# }
# return res
# def modelEquationM1(t, y, P, T):
# """
# M1 model
# mass balance equations
# modelParameters:
# pressure [Pa]
# temperature [K]
# """
# # operating conditions
# # P = modelParameters['pressure']
# # T = modelParameters['temperature']
# # # components
# # comp = modelParameters['components']
# # # components no
# # compNo = len(comp)
# #! loop vars
# # MoFri = np.copy(y)
# yi_H2 = y[0]
# yi_CO2 = y[1]
# yi_H2O = y[2]
# yi_CO = y[3]
# yi_CH3OH = y[4]
# yi_DME = y[5]
# # molar flux [kmol/m^2.s]
# MoFl = y[6]
# # mole fraction list
# MoFri = [yi_H2, yi_CO2, yi_H2O, yi_CO, yi_CH3OH, yi_DME]
# # kinetics
# Ri = PackedBedReactorClass.modelReactions(P, T, MoFri)
# # H2
# R_H2 = -(3*Ri[0]-Ri[1])
# # CO2
# R_CO2 = -(Ri[0]-Ri[1])
# # H2O
# R_H2O = (Ri[0]-Ri[1]+Ri[2])
# # CO
# R_CO = -(Ri[1])
# # CH3OH
# R_CH3OH = -(2*Ri[2]-Ri[0])
# # DME
# R_DME = (Ri[2])
# # total
# R_T = -(2*Ri[0])
# # mass balance equation
# # loop vars
# A1 = 1/MoFl
# B1 = 1
# # H2
# dxdt_H2 = A1*(R_H2 - y[0]*R_T)
# # CO2
# dxdt_CO2 = A1*(R_CO2 - y[1]*R_T)
# # H2O
# dxdt_H2O = A1*(R_H2O - y[2]*R_T)
# # CO
# dxdt_CO = A1*(R_CO - y[3]*R_T)
# # CH3OH
# dxdt_CH3OH = A1*(R_CH3OH - y[4]*R_T)
# # DME
# dxdt_DME = A1*(R_DME - y[5]*R_T)
# # overall
# dxdt_T = B1*R_T
# # build diff/dt
# dxdt = [dxdt_H2, dxdt_CO2, dxdt_H2O,
# dxdt_CO, dxdt_CH3OH, dxdt_DME, dxdt_T]
# return dxdt
# def modelReactions(P, T, y):
# try:
# # pressure [Pa]
# # temperature [K]
# # print("y", y)
# # parameters
# RT = CONST.R_CONST*T
# # kinetic constant
# # DME production
# # [kmol/kgcat.s.bar2]
# K1 = 35.45*MATH.exp(-1.7069e4/RT)
# # [kmol/kgcat.s.bar]
# K2 = 7.3976*MATH.exp(-2.0436e4/RT)
# # [kmol/kgcat.s.bar]
# K3 = 8.2894e4*MATH.exp(-5.2940e4/RT)
# # adsorption constant [1/bar]
# KH2 = 0.249*MATH.exp(3.4394e4/RT)
# KCO2 = 1.02e-7*MATH.exp(6.74e4/RT)
# KCO = 7.99e-7*MATH.exp(5.81e4/RT)
# # equilibrium constant
# Ln_KP1 = 4213/T - 5.752 * \
# MATH.log(T) - 1.707e-3*T + 2.682e-6 * \
# (MATH.pow(T, 2)) - 7.232e-10*(MATH.pow(T, 3)) + 17.6
# KP1 = MATH.exp(Ln_KP1)
# log_KP2 = 2167/T - 0.5194 * \
# MATH.log10(T) + 1.037e-3*T - 2.331e-7*(MATH.pow(T, 2)) - 1.2777
# KP2 = MATH.pow(10, log_KP2)
# Ln_KP3 = 4019/T + 3.707 * \
# MATH.log(T) - 2.783e-3*T + 3.8e-7 * \
# (MATH.pow(T, 2)) - 6.56e-4/(MATH.pow(T, 3)) - 26.64
# KP3 = MATH.exp(Ln_KP3)
# # total concentration
# # Ct = y(1) + y(2) + y(3) + y(4) + y(5) + y(6);
# # mole fraction
# yi_H2 = y[0]
# yi_CO2 = y[1]
# yi_H2O = y[2]
# yi_CO = y[3]
# yi_CH3OH = y[4]
# yi_DME = y[5]
# # partial pressure of H2 [bar]
# PH2 = P*(yi_H2)*1e-5
# # partial pressure of CO2 [bar]
# PCO2 = P*(yi_CO2)*1e-5
# # partial pressure of H2O [bar]
# PH2O = P*(yi_H2O)*1e-5
# # partial pressure of CO [bar]
# PCO = P*(yi_CO)*1e-5
# # partial pressure of CH3OH [bar]
# PCH3OH = P*(yi_CH3OH)*1e-5
# # partial pressure of CH3OCH3 [bar]
# PCH3OCH3 = P*(yi_DME)*1e-5
# # reaction rate expression [kmol/m3.s]
# ra1 = PCO2*PH2
# ra2 = 1 + (KCO2*PCO2) + (KCO*PCO) + MATH.sqrt(KH2*PH2)
# ra3 = (1/KP1)*((PH2O*PCH3OH)/(PCO2*(MATH.pow(PH2, 3))))
# r1 = K1*(ra1/(MATH.pow(ra2, 3)))*(1-ra3)*bulk_rho
# ra4 = PH2O - (1/KP2)*((PCO2*PH2)/PCO)
# r2 = K2*(1/ra2)*ra4*bulk_rho
# ra5 = (MATH.pow(PCH3OH, 2)/PH2O)-(PCH3OCH3/KP3)
# r3 = K3*ra5*bulk_rho
# # result
# # r = roundNum([r1, r2, r3], REACTION_RATE_ACCURACY)
# r = [r1, r2, r3]
# return r
# except Exception as e:
# print(e)
# raise
# def modelEquationM2(t, y, P, comList, StHeRe25, reactionListSorted):
# """
# M2 model
# mass and energy balance equations
# modelParameters:
# pressure [Pa]
# compList: component list
# StHeRe25: standard heat of reaction at 25C
# reactionListSorted: reaction list
# """
# # REVIEW
# # t
# # print(f"t: {t}")
# # components no
# # y: component mole fraction, molar flux, temperature
# compNo = len(y[:-2])
# indexMoFl = compNo
# indexT = indexMoFl + 1
# #! loop vars
# # MoFri = np.copy(y)
# # yi_H2 = y[0]
# # yi_CO2 = y[1]
# # yi_H2O = y[2]
# # yi_CO = y[3]
# # yi_CH3OH = y[4]
# # yi_DME = y[5]
# # mole fraction list
# # MoFri = [yi_H2, yi_CO2, yi_H2O, yi_CO, yi_CH3OH, yi_DME]
# MoFri = y[:-2]
# # molar flux [kmol/m^2.s]
# MoFl = y[indexMoFl]
# # temperature [K]
# T = y[indexT]
# # kinetics
# Ri = np.array(PackedBedReactorClass.modelReactions(P, T, MoFri))
# # H2
# R_H2 = -3*Ri[0] + Ri[1]
# # CO2
# R_CO2 = -Ri[0] + Ri[1]
# # H2O
# R_H2O = Ri[0] - Ri[1] + Ri[2]
# # CO
# R_CO = -Ri[1]
# # CH3OH
# R_CH3OH = -2*Ri[2] + Ri[0]
# # DME
# R_DME = Ri[2]
# # total
# R_T = -2*Ri[0]
# # enthalpy
# # heat capacity at constant pressure of mixture Cp [kJ/kmol.K]
# # Cp mean list
# CpMeanList = calMeanHeatCapacityAtConstantPressure(comList, T)
# # print(f"Cp mean list: {CpMeanList}")
# # Cp mixture
# CpMeanMixture = calMixtureHeatCapacityAtConstantPressure(
# MoFri, CpMeanList)
# # print(f"Cp mean mixture: {CpMeanMixture}")
# # enthalpy change from Tref to T [kJ/kmol]
# # enthalpy change
# EnChList = np.array(calEnthalpyChangeOfReaction(reactionListSorted, T))
# # heat of reaction at T [kJ/kmol]
# HeReT = np.array(EnChList + StHeRe25)
# # overall heat of reaction
# OvHeReT = np.dot(Ri, HeReT)
# # cooling temperature
# Tm = 523
# # overall heat | |
<reponame>MechMaster48/RainbowSixSiege-Python-API<gh_stars>100-1000
"""
Copyright (c) 2016-2020 jackywathy
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
loadouts_const = [
{
"id": "1MobOPbsFZoVpLWDUUgmeg",
"name": "dp27",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7LoT7yAe0LK7bDOeq6MZZM/33995bc704667674af1b73fe962d4c7c/Primary_gun_DP27.png"
},
{
"id": "3iisbOg3JC9epuJDdrMcAk",
"name": "9x19vsn",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/42gH96xTTYaTZsfXI3c0wL/a7edbf11af97091ee884b68e59fe6a4f/9x19VSN.png"
},
{
"id": "4EmVfbHbYqwRNnK02lU79C",
"name": "pmm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3y4LIwwm8YNQHAv8oOkWCK/a2375901cee34e68fa39c976d85de8aa/PMM.png"
},
{
"id": "3Ch5Pac0IKVBJe5oYZzIol",
"name": "gsh-18",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5s5Q33j3MNcXf9lwfxfd7m/4eb3a6af1d431481b6ddcec44fbc7602/GSh-18.png"
},
{
"id": "3WoO6qQpm6SkD2ceFlpIVq",
"name": "barbed-wire",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7igaibxuCcSpWDkZensEJS/bfa2cef52f3d860b7a06c2b4d7a6340e/Barbed_wire.png"
},
{
"id": "5QtTa00eoscVRzAfGy44y6",
"name": "proximity-alarm",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2TsFLmb2O6LRZpbxzkZDck/c9146913388a9567500b704c95600621/Proximity_alarm.png"
},
{
"id": "4rJKd9S4S3Edu84n3jaWbq",
"name": "shumikha-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/37wX75QnY7XA6KbjM4aF5n/0ab116d398cf71463e11d43913818ec1/Shumikha-Launcher.png"
},
{
"id": "1LVSwzrXIEAd1O3vntSQMs",
"name": "p10-roni",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7K86OBjL3zmYWt0ZvUcCLj/16a947334e39f27da177d787773593e4/r6-operator-weapon-smg-p10roni.png"
},
{
"id": "6xDz1HSwIn3ZcV9nKIeKUN",
"name": "mk-14-ebr",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6KIMqp5dA95z1RI3PrG9jv/eb939638169811a3fa858a44e6e5d97e/Mk_14_EBR.png"
},
{
"id": "5mI0sCcUxKW3Imv5ZMBBeL",
"name": "prb92",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/dl28J1HsE7mzhj66pmd5D/b8d8fc48d2dde13154047de94abbd8ca/PRB92.png"
},
{
"id": "7eb4vAG3ycZGuIRAoRl58a",
"name": "surya-gate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4hLJAAVKrf50wosG0471od/cde1867daf863c03754969f159ac00de/r6s-operator-ability-aruni.png"
},
{
"id": "2Mh1URS57n4Yuc2vHOojl7",
"name": "super-90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1TLWSu0xHJlAsfEfafeC9X/f9647e70a18962bf1627095c8b46832e/Super_90.png"
},
{
"id": "4pZ8kx4SSqhhLJ1iaSyEAU",
"name": "9mm-c1",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60sbThKtOpNOwKu3OP0oGV/672fd9263f7786402a0d855273473a6f/9mm_C1.png"
},
{
"id": "2cDP1BjKw2UkKkDJhYLZAU",
"name": "mk1-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3tWoNeF3jQYs3w4EOydQYs/434409c96693df1fd3e969d778e70795/Mk1_9mm_BI.png"
},
{
"id": "6Urz2FjkmefuoCPGDoVZCm",
"name": "ita12s",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5G4DroaSdqHzJWCe7qqbHZ/5dd2e03f853182c78a1e7fcbc642f0cf/ITA12S.png"
},
{
"id": "7pAPyONkaR3xGR47gvXwSz",
"name": "bulletproof-camera",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/gZuOXvuTu2i8hQX0B6auy/259f379a6283bae618443d722a896f1a/Bulletproof_camera.png"
},
{
"id": "6ZPm8q8dyQXt1my5OHZWic",
"name": "deployable-shield",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/W0WE0X2VQlxwWIAFCJ6Jm/523650a39de5a23dd9520d7299c9e25a/Deployable_Shield.png"
},
{
"id": "2yKP1QdTJfIMQN9d7ZeTmU",
"name": "welcome-mate",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/xsIzH7XCAqvn7F3tEfAPe/c41e59a9d7f2ed7ee38b16ed0a882351/Welcome-Mate.png"
},
{
"id": "zs4Rebj67KAk06ASjJPxO",
"name": "spas-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Hp6Fbss6uI59OT4nZNB6e/a4d09954803cb2580353cfa03e8c778b/SPAS-12.png"
},
{
"id": "41pnpfTTAjzKYvpEBmNKdD",
"name": "t-5-smg",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1Ne8bvX8BdCALevWKMllQN/4baa3e79d323de134dd182e0272b9c3b/T-5_SMG.png"
},
{
"id": "1fj1XX5YxggVcr5mU1OPy3",
"name": "bailiff-410",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/N8FLbo4fsNyBe8msKgRhT/8f403dc0b58087bcafab786dd95ba33f/Bailiff_410.png"
},
{
"id": "4HiVAhAUQe5BEXgBNg2ECe",
"name": "usp40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7FxemzWRtlpAhK9MyKp1Gp/817cc25b6b7c3575dc1ba53a6a8170a9/USP40.png"
},
{
"id": "6kF8p8NlbGPvjRz42YxYYE",
"name": "remah-dash",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3dM2B3qCdU0woydIbiy2xn/55aa99443002ad794d3f78dada26d035/r6s-operator-ability-oryx.png"
},
{
"id": "eR3JkIxE5GyWvNpybHCRr",
"name": "mp5",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/60YbOvSBQt6ZUlu8YDXoZm/51ef3857b2986de700262432e8433714/MP5.png"
},
{
"id": "5QeGmJGqn3gZxACxzF4kbR",
"name": "rg15",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2LNSsp7B7wUnnPUweir7Jm/9f66d53be7a63a17a55253a0bea6eec1/RG15.png"
},
{
"id": "7zuAWr4kVRJFYVj33Ltfex",
"name": "impact-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7iJK9B1Vr3PDO3rGftU00l/c3d8edc5564a80580e4ac2f9a4fc3937/Impact_Grenade.png"
},
{
"id": "4Lnu4kaDPzUIxgCStqfrbR",
"name": "nitro-cell",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4pBSTw9U6l9GRnT12G6Xln/e0991bc03b48d217f510af8b611c8828/Nitro_Cell.png"
},
{
"id": "5CkPFHPPJB3909Fff9BYBs",
"name": "banshee",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/49ixqWhGgjvHu0Ay8JzeSH/c6a3fe584847850186e15c7fb4244385/r6s-operator-ability-melusi.png"
},
{
"id": "wfzQPegCiVkDRgsx6MOjZ",
"name": "c8-sfw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1itXpz2GnvdwwRyhX1SYa2/b58ff71048fa3bb5ed09d5d935dc90f4/C8-SFW.png"
},
{
"id": "5b6dGdkffoVKAyDycG5hjg",
"name": "camrs",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4dBzqVVmnpv1DZi91LAnEN/e374b4ea289fc992280b943cdbb94d60/CAMRS.png"
},
{
"id": "myf6Hy39exE9Cot5zDEis",
"name": "claymore",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4T4H5EJgUxorucGVtU2pkm/74fef324b89c220ce6426e8097f915b9/Claymore.png"
},
{
"id": "2NNtCVZhqQykqVEtze4fxJ",
"name": "stun-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3XnK8s1iQJQu5cfr6UyQfK/429480b96d6d6d6b830c32c75d2608f5/Stun_Grenade.png"
},
{
"id": "XNjuIs9nL1RQNnWOMNfC9",
"name": "skeleton-key",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2w8EQtN4FFtEMa9lBYyWGg/36bbc6d819761c11418c868d2e483991/Skeleton-Key.png"
},
{
"id": "65tbXPRuQxAV8RUaMoCYJh",
"name": "lmg-e",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7JVJIew6t3iKwgByvrFXyi/7ba44dfda28b525506633e453104a604/LMG-E.png"
},
{
"id": "2dvLoMLwWSRwyK70gARboS",
"name": "m762",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4oWAgi7tgQP1Tq0HooRtye/9109a74921ee17610d4bd85a61582823/M762.png"
},
{
"id": "75HMflo54bNBGSUyX2je5s",
"name": "breach-charge",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1OgTMhyF1FBsSAo4njk26m/9881001e9db03a4806b2eea6007e4a1a/Breach_Charge.png"
},
{
"id": "5tTXUrm4TLdSBHtsJ1p9d8",
"name": "ks79-lifeline",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1elqIEWJ6XsXKAbMNd0Cai/0b4c0591bad284d957e652cdae0b706b/KS79-Lifeline.png"
},
{
"id": "HCADlLBkfNlDRRvlq3iPo",
"name": "mp5sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5HaMldwFltBwiiyDDfkPpD/6de3aa9aaa17458e7f6186ba59b8deff/MP5SD.png"
},
{
"id": "67fxpqXp4gjOQixPHPaQMB",
"name": "supernova",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2tpjCRFLcc3hogjJGbKDsi/5ad0ab63b7245022aca5c1c1fb42d473/SuperNova.png"
},
{
"id": "4mbLbnjsLEQ27BEXQ1vqGs",
"name": "p229",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/76ja0RxqzHW9PpvWgpG7Sk/cb753b50b20fe67deaef54d8b2a46b54/P229.png"
},
{
"id": "7sblClEPf57IKm77UCqFSj",
"name": "bearing-9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4mdftEOh5Vu9KhhpgKLKrT/abedcc75868774018295ec2a08a7b3de/Bearing_9.png"
},
{
"id": "5FUiujmYYXsvq1zQ0lZlVx",
"name": "yokai",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/TdDZyrKpjt9EQo8tHpIJk/d987db4da22046a0663be8be82dcda88/Yokai.png"
},
{
"id": "5cZ1wkLzuOHnnYjma40PwQ",
"name": "bosg-12-2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2ZjVndetsX8WEn5ZfyUQa0/e3a781be7eab22876d25f748e8fd0f5a/BOSG.12.2.png"
},
{
"id": "2zEjl6sxdsxgVBmAsDZxcq",
"name": "c75-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3wUuefwPjU705mZkTdJ9UH/8ccb11884cfa34c176ac5500af139177/C75_Auto.png"
},
{
"id": "4W61sh5pt9Ghkw4g7Muvee",
"name": "smg-12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/EwJgB7KdgOb6dDm7ro33u/b73f0890f992c1a365210f08efcc6db5/SMG-12.png"
},
{
"id": "7aslgBcBTFi4XKqlAkvvrc",
"name": "smoke-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3LaxoSLC49T5vgKnUAlTLT/c47c4636845a04478432c48be8c29aee/Smoke_Grenade.png"
},
{
"id": "5o5qaxqMxosu04407U4sBL",
"name": "logic-bomb",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5ej2g1iCMHdfjn8h8qgfmU/bf07fef4b063a46389ca650ed02b292a/Logic-Bomb.png"
},
{
"id": "kzR6vfRLXm9f1EvoK9dBP",
"name": "m12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4FxqA5pa8JY9QQ7FEcjwPw/ffc779fcde5b970e7b95db6653637dab/M12.png"
},
{
"id": "5gcX8x7LiBHg2LA1JIdEHp",
"name": "spas-15",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/CyofBgipHq4RTafvPFWd4/bc3d0ecc871b70e57735855f852efacf/SPAS-15.png"
},
{
"id": "1Y7hJmAXWWqh1MIkXqUbKw",
"name": "luison",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5cSDFUWb8P1BAQUgnTozeM/fd3a3348f42c95d6afa9f105ae23f2e5/Luison.png"
},
{
"id": "1ojdoiQ8AbqFX3FB7Neqmk",
"name": "silent-step",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6PTsBBBGTT5oixxzvYv1Y4/18e31c74ba1ca73ed2694134acd9c078/Silent-Step.png"
},
{
"id": "6Il345pPRhv4Xx4qzTFpmA",
"name": "aug-a2",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1eO39zRe8XxJXH1KZiIWhM/02049ced0fbfa630833e8b0d3c03de07/AUG_A2.png"
},
{
"id": "3Xq4lwAY8Sc1Z687gD9mnD",
"name": "mp5k",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1pk8nOI7ybQjYOSI4fuzOm/fcd78df0f729be545e75c09aae85c360/MP5K.png"
},
{
"id": "4PHq1TcVzAqQp11Ve7CFFC",
"name": "d-40",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4niSMDCeiryoMBXJZq60Vv/48339331d05e289868cf4050c49b1b2b/D-40.png"
},
{
"id": "5Y36nPWZ6lXp37GDupoLRV",
"name": "p12",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2mpM7rah7rwEW0bViIirUC/ed9caa4db58421519fa4db390b1aa164/P12.png"
},
{
"id": "6L5PL3qOQjjpNUdA9l0WLD",
"name": "mag-net-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1IKNZzLv63AJd9vlbXj3Bo/883371432ffb22e5bf35bc82dd706384/Mag-net_System.png"
},
{
"id": "3ePDML7HMucggZaNG2nL0a",
"name": "t-95-lsw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/23HCxaNTRUHBlFAvCTMZQm/fe319cc164fac034a29e9b114ae7d5cb/T-95_LSW.png"
},
{
"id": "6pPXSrzgAKEyTiiyrs1Qbn",
"name": "six12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2v6MwsHwjOZ5Muid53lyfN/e5f1c4997db93abfe3ac356fce23376c/SIX12.png"
},
{
"id": "3ECycrhAlLH7str0T4F2hp",
"name": "q-929",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2fRVszR5yGDHbV0AL8muso/0838dac90b66aa810daa49d36382fb64/Q-929.png"
},
{
"id": "31sOhkze6zBhWkkM8HR44n",
"name": "secondary-breacher",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3OvnVPWY1UyrDE913kU0a1/eae4b2a1584234ea2ff4ad6481239f3b/SecondaryBreacher.png"
},
{
"id": "168akpqtP52LsTYlMIqeHX",
"name": "candela",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4vpN9vu5wD9dyb2knMosTy/430796de3c0c2a5c2eb2ac6f4217eba0/Candela.png"
},
{
"id": "5hAVF2eVv7NyeJPAJL07sg",
"name": "ak-74m",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1j5HiQP8aFphTe65fqDdg0/23eecb5c603c5ba9f59fc6cbc5e4a531/AK-74M.png"
},
{
"id": "Q4Q9LtkAztMdeUT53C39j",
"name": "arx200",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6VgkPBsr1WApI3rWc9kcM0/b18b8e25f3e951e8e722213f2ee59eb0/ARX200.png"
},
{
"id": "2NcOcqzfy4HnaHNUITnUYN",
"name": "-44-mag-semi-auto",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6W3Jz0YcQzbZ6BOPr7VVel/4c67f342964132a652f7d5821e887050/.44_Mag_Semi-Auto.png"
},
{
"id": "6dV0styTnHMeqh4effTNF8",
"name": "airjab-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6d0LN1QWzviEkcYu3mTn6v/e49511a479756f71224f14225ad9cbd8/Airjab-Launcher.png"
},
{
"id": "4jhzD37iXaCsWyBAo1PQ5J",
"name": "p90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4nGrNspOvII2oS3lEMkg5x/2398a493c298bc654f97c58767aa40f3/P90.png"
},
{
"id": "43jUNG843Bn0knjA3tXwXo",
"name": "sg-cqb",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5JoL3b36Fsztt9Q2XYmrbJ/dacec96948d3f8fe92914a69b9aac593/SG-CQB.png"
},
{
"id": "ElQvUTqCd5JbW2PIJ0lTS",
"name": "p9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6Fd1cl17KA0CtgodEiiY6v/d0f145ea72f2aacbd04260ba7d8f1c74/P9.png"
},
{
"id": "55BZj1JeqRvuczMpa04gRU",
"name": "lfp586",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1zc7UtdBfCZakwbiYqBvSz/1fd3f1584de38ca7c9315d498f094276/LFP586.png"
},
{
"id": "6XCPWiyRqIM6rfCYnSRFKg",
"name": "armor-pack",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/MeoKw7iPY6EFYvjS07CRg/b2d7eba623f3c63d6b7097a8f2253954/Armor-Pack.png"
},
{
"id": "3ATrltpsW24BFhZMHNmhfI",
"name": "fmg-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/0oneJNsBR06QjuowxwtHG/bd3b391c6eec2bd615f2ed83197a13ac/FMG-9.png"
},
{
"id": "4ggSgqX4ixVHJZwhnenHC1",
"name": "six12-sd",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1GTua079Xbtkpjhx96sRsW/079ed1a71a0d12b5e48e1b0d40b87110/SIX12_SD.png"
},
{
"id": "01zcYOKDgxP24MPkEaswD7",
"name": "5.7-usg",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/tkYcSAJSe5yGkeUhzZqBO/e81feb86df4a7eb6951052bec26b6ed7/5.7_USG.png"
},
{
"id": "7GKyGyCXQ9vVZ0kCdSPJl4",
"name": "d-50",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/6mMQRDsrComRFa7bC6cNkG/8cd17e545e3d28dcc11a040d000cfa16/D-50.png"
},
{
"id": "1p5ZdYWvISi4qDV0S2fDP4",
"name": "frag-grenade",
"weapon_type": "gadget",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4GZsPHbm9H0k5EWz7TMkwO/33b9007bc6ee03dab15cfa15eb69e096/Frag_Grenade.png"
},
{
"id": "6GQ8on95B9PMLjMDrZjXgD",
"name": "hel-presence-reduction",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/57miqbOn8xWBh7ne7za3CV/35364108d49380a0ed33998f970e104f/HEL-Presence-Reduction.png"
},
{
"id": "56o4y5mOsXlFhnzWlq9xMJ",
"name": "commando-9",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4P9dpUph5w3MSsLNnW6be/04baba24990fcb75a9c0bcfd01b7d190/Commando_9.png"
},
{
"id": "J5YsiIB8uvpeZgrWXrhlA",
"name": "sdp-9mm",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/Tgsdyz3XEqmgUYi9aZZgb/6755f4da7af7a7179ffab92acf8d477e/SDP_9mm.png"
},
{
"id": "6m6vEqsps3Mhn6cOHo9yKS",
"name": "pest-launcher",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5L0fFKVOwozKMcmJoenfef/56e4efdf77363556b35a76fd4e0e60f6/Pest-Launcher.png"
},
{
"id": "5AVE3Ok87dbmTwuI5K5fZg",
"name": "le-rock-shield",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "7J9icaPnaxguoiBWfdqomb",
"name": "le-rock-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1bmXJOakdA6SOrGxBKA70T/1e489e366d6db287f475963df2040d3d/Extendable-Shield.png"
},
{
"id": "64NDkY7SFav037M3uh6KRD",
"name": "vector-45-acp",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7D1cDf13FqUhoLihzvuPln/068aa7e507155598449c58c0a49a90d6/Vector_.45_ACP.png"
},
{
"id": "1M88HlyLX6jD774vkptDLV",
"name": "ita12l",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4Y6ziRzm9RiPii83fm8BV1/1f472744d2c2dec8d9206f4d8733d92c/ITA12L.png"
},
{
"id": "6X2RibCre3jpetmCoFZaUu",
"name": "black-mirror",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1a1w8epOhWE8VtzvvCJG9d/b20cbb221f7d45e5838f839ce042f409/Black-mirror.png"
},
{
"id": "2xDl4cDXX48FuUiMPApZHo",
"name": "ar-15-50",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4lGGEGZLkbldz114Wl5hCo/78a04c46654f80fae03e730bd79f3563/AR-15.50.png"
},
{
"id": "4zn5v7GdQhRyojYT6qAwwM",
"name": "m4",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3jhi90ycmuc8mAiuSXFoCi/bcf354459e7becd6ede52ee97917c832/M4.png"
},
{
"id": "4GfGPq4g6TDwHvQEJII9ee",
"name": "1911-tacops",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/189UukZ6fVnvQR6LJtLYry/6eec29603d5b7b0ca8cab6ac0ef083ac/1911_TACOPS.png"
},
{
"id": "3ECK2BieW8MOShqE0XJVwd",
"name": "breaching-torch",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4rPBvxDKsKiQCMjt7GxJMw/09e45c68bbc41c1721acbbe0257e2465/Breaching-Torch.png"
},
{
"id": "4F64StqLivWX4lHm7iNgqG",
"name": "v308",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5YBZe76NUDO32eF66wW90g/488c315743d59230962a4d67618223d6/V308.png"
},
{
"id": "7MC9QIlZkFL8AAqkAfGIbV",
"name": "417",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5djkS4YtAtOF0vBmg0T60x/ea2b1ff7e5367e66c99bc7ad7e95bfe3/417.png"
},
{
"id": "zRjInzDWpoahREdiE2RDM",
"name": "ee-one-d",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7fRknnWl2K2qjKle1t79j/0506d25798aeb0691c8a576665050f7d/EE-ONE-D.png"
},
{
"id": "7tItrCBHMWLbtvlBxYDWfS",
"name": "g36c",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2SZoqSXKoNPvZFIJsFsDE5/cb109885bf19c8697abf832f10cfd9a6/G36C.png"
},
{
"id": "4FYdLQfxYBnKCfY2cZ9flD",
"name": "gemini-replicator",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/K8E4EHWbD8wTjVqro6wVl/62339b2fbe1d3a2319dcd320f7a0b070/r6s-operator-ability-iana.png"
},
{
"id": "E0pKBweJkY0ok4BvfDSxv",
"name": "csrx-300",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7tUB9ZNXJhdN6ejAkCEeFQ/99691bcc19f641cf872925905d08a539/CSRX_300.png"
},
{
"id": "5EraRZbq9P8VR8Sd0Sarh9",
"name": "spsmg9",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/5EtwSgylXckBNg4n6gDR9J/bc6fc6c5c12ae11da59aee95828ebd76/SPSMG9.png"
},
{
"id": "2Eik88OMmWOse0qBVegpjG",
"name": "lv-explosive-lance",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/75eebt48ELO4eGGdIMVMpY/9533c7dc8f36651f5b5ad50c8ccb6c5a/LV_Explosive_Lance.png"
},
{
"id": "3rjbxjDZx9mwvN5xHkZDWp",
"name": "aug-a3",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3W9XJdMOgpHSw55HfwRSAv/cf8f220678d503e6c3e535c00b2e636a/AUG_A3.png"
},
{
"id": "3vCxcPpLsOovwCKEuXLJrN",
"name": "tcsg12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2NDbY7BTBJ9R09LUilTlRf/3728337cd3ba14ed6ab9de0c22e879af/TCSG12.png"
},
{
"id": "4hAJAIXdGAU0uCmTcwLoha",
"name": "rtila-electroclaw",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7rUOk2LhYIUjvYLot7GT8Y/94b72bfbbfdf50c2c807cdbf9f5b276e/Rtila-Electroclaw.png"
},
{
"id": "7gAppJYmlz1A8xXgPt0a5m",
"name": "m870",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2rkU6g4Rlg0e0U4rczWGTV/a51589a54c43f476d8eb984c0ea881e9/M870.png"
},
{
"id": "2Hre4GaBWs92I37LII1O8M",
"name": "416-c-carbine",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/2I86r2a2QD8EHTZVZnxcxy/2913450ba952a16c29fac1f5ce58ba1a/416-C_Carbine.png"
},
{
"id": "17DSq6qMxwSmARSjIDwDKE",
"name": "active-defense-system",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1YCujceutAcJ7F10yhHC41/c5f870e7789b6396c9997ed45ccd3beb/Active-Defense-System.png"
},
{
"id": "2BpqLwwDeSr7QpNZqsLvBt",
"name": "f90",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/62tE3th2ThcGHlrcqWkmEX/d69c9de199542e25fa55f6d293f15671/r6-operator-weapon-ar-f90.png"
},
{
"id": "3zbsuyeTh78X5KHV3M7Ctt",
"name": "m249-saw",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/3p0oG7GsLIoHaRullf7xsF/e2a9e135af63e8897355023cd34538c4/M249_SAW.png"
},
{
"id": "WliOiho6hjQFZ7BiJT7uV",
"name": "super-shorty",
"weapon_type": "secondary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7Dq8LDmIxAveRqXM17orUW/cbd96b47cd8ca74a7827b16ef73fe7cf/r6-operator-weapon-sa-supershorty.png"
},
{
"id": "2IZvSVScGT9SAKL7oedtlN",
"name": "trax-stingers",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/QGVvmZeZ91FC2X4mvMzgn/601fa45e635872aea31f15ffebb9c366/Trax-Stingers.png"
},
{
"id": "5nXwwDj4qtPaGvCKrTqdpC",
"name": "ots-03",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/4fXznwDtLt61VCF8QIF4N3/34e2e6d6c33d4c504c945bdd13c322f6/OTs-03.png"
},
{
"id": "5JiIaIiidLpM5wZGRmbZxO",
"name": "flip-sight",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/73bNPGhlIuhlWvi497sYqE/b68414436088f62f9da44cd42f702df7/Flip-Sight.png"
},
{
"id": "3seBqopkUJQZwKAodylxXj",
"name": "volcan-shield",
"weapon_type": "unique_ability",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1JqlRdbaVA73jDq8y46vX4/82e89f39c479526ace294ba246d0b085/Volcan-Shield.png"
},
{
"id": "bz7Z7LsOpGGFaLxmNl5nY",
"name": "ak-12",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/7KAZZgnpqD07y47jVVXEuh/e0d7e67101f8f966aa6e1c59e835454f/AK-12.png"
},
{
"id": "4t1fOF2T7bKerBD9VJA5HH",
"name": "6p41",
"weapon_type": "primary",
"weapon_image_url": "https://staticctf.akamaized.net/J3yJr34U2pZ2Ieem48Dwy9uqj5PNUQTn/1wxS2HOCvoPAfnJEDFWjfw/7feddb98582ec37b500243d3f3e19eca/6P41.png"
},
{
"id": "2dRlzAkeuYgN8yAw3538qs",
"name": "ballistic-shield",
| |
grep LISTEN | grep usbredir | awk '{print $4}' | awk -F ':' '{ print $4 }'")
if r != 0:
rsp.success = False
rsp.error = "unable to get started usb server port"
return jsonobject.dumps(rsp)
existPort = o.split("\n")
for value in cmd.portList:
uuid = str(value).split(":")[0]
port = str(value).split(":")[1]
if port not in existPort:
rsp.uuids.append(uuid)
continue
existPort.remove(port)
# kill stale usb server
for port in existPort:
bash_r("systemctl stop usbredir-%s" % port)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def get_usb_devices(self, req):
class UsbDeviceInfo(object):
def __init__(self):
self.busNum = ""
self.devNum = ""
self.idVendor = ""
self.idProduct = ""
self.iManufacturer = ""
self.iProduct = ""
self.iSerial = ""
self.usbVersion = ""
def toString(self):
return self.busNum + ':' + self.devNum + ':' + self.idVendor + ':' + self.idProduct + ':' + self.iManufacturer + ':' + self.iProduct + ':' + self.iSerial + ':' + self.usbVersion + ";"
# use 'lsusb.py -U' to get device ID, like '0751:9842'
rsp = GetUsbDevicesRsp()
cmd = jsonobject.loads(req[http.REQUEST_BODY])
r, o, e = bash_roe("timeout 5 lsusb.py -U")
if r != 0:
rsp.success = False
rsp.error = "%s %s" % (e, o)
return jsonobject.dumps(rsp)
idSet = set()
usbDevicesInfo = ''
for line in o.split('\n'):
line = line.split()
if len(line) < 2:
continue
idSet.add(line[1])
for devId in idSet:
# use 'lsusb -v -d ID' to get device info[s]
r, o, e = bash_roe("lsusb -v -d %s" % devId)
if r != 0:
rsp.success = False
rsp.error = "%s %s" % (e, o)
return jsonobject.dumps(rsp)
info = UsbDeviceInfo()
for line in o.split('\n'):
line = line.strip().split()
if len(line) < 2:
continue
if line[0] == 'Bus' and len(line) > 3:
info.idVendor, info.idProduct = devId.split(':')
info.busNum = line[1]
info.devNum = line[3].rsplit(':')[0]
elif line[0] == 'idVendor':
info.iManufacturer = ' '.join(line[2:]) if len(line) > 2 else ""
elif line[0] == 'idProduct':
info.iProduct = ' '.join(line[2:]) if len(line) > 2 else ""
elif line[0] == 'bcdUSB':
info.usbVersion = line[1]
# special case: USB2.0 with speed 1.5MBit/s or 12MBit/s should be attached to USB1.1 Controller
rst = bash_r("lsusb.py | grep -v 'grep' | grep '%s' | grep -E '1.5MBit/s|12MBit/s'" % devId)
info.usbVersion = info.usbVersion if rst != 0 else '1.1'
elif line[0] == 'iManufacturer' and len(line) > 2:
info.iManufacturer = ' '.join(line[2:])
elif line[0] == 'iProduct' and len(line) > 2:
info.iProduct = ' '.join(line[2:])
elif line[0] == 'iSerial':
info.iSerial = ' '.join(line[2:]) if len(line) > 2 else ""
if info.busNum == '' or info.devNum == '' or info.idVendor == '' \
or info.idProduct == '' or '(error)' in info.iManufacturer or '(error)' in info.iProduct:
rsp.success = False
rsp.error = "cannot get enough info of usb device"
return jsonobject.dumps(rsp)
else:
usbDevicesInfo += info.toString()
rsp.usbDevicesInfo = usbDevicesInfo
return jsonobject.dumps(rsp)
@lock.file_lock('/run/usb_rules.lock')
def handle_usb_device_events(self):
bash_str = """#!/usr/bin/env python
import urllib2
def post_msg(data, post_url):
headers = {"content-type": "application/json", "commandpath": "/host/reportdeviceevent"}
req = urllib2.Request(post_url, data, headers)
response = urllib2.urlopen(req)
response.close()
if __name__ == "__main__":
post_msg("{'hostUuid':'%s'}", '%s')
""" % (self.config.get(kvmagent.HOST_UUID), self.config.get(kvmagent.SEND_COMMAND_URL))
event_report_script = '/usr/bin/_report_device_event.py'
with open(event_report_script, 'w') as f:
f.write(bash_str)
os.chmod(event_report_script, 0o755)
rule_str = 'ACTION=="add|remove", SUBSYSTEM=="usb", RUN="%s"' % event_report_script
rule_path = '/etc/udev/rules.d/'
rule_file = os.path.join(rule_path, 'usb.rules')
if not os.path.exists(rule_path):
os.makedirs(rule_path)
with open(rule_file, 'w') as f:
f.write(rule_str)
@kvmagent.replyerror
@in_bash
def update_os(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
exclude = "--exclude=" + cmd.excludePackages if cmd.excludePackages else ""
updates = cmd.updatePackages if cmd.updatePackages else ""
releasever = cmd.releaseVersion if cmd.releaseVersion else kvmagent.get_host_yum_release()
yum_cmd = "export YUM0={};yum --enablerepo=* clean all && yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn{} {} update {} -y"
yum_cmd = yum_cmd.format(releasever, ',zstack-experimental-mn' if cmd.enableExpRepo else '', exclude, updates)
rsp = UpdateHostOSRsp()
if shell.run("which yum") != 0:
rsp.success = False
rsp.error = "no yum command found, cannot update host os"
elif shell.run("export YUM0={};yum --disablerepo=* --enablerepo=zstack-mn repoinfo".format(releasever)) != 0:
rsp.success = False
rsp.error = "no zstack-mn repo found, cannot update host os"
elif shell.run("export YUM0={};yum --disablerepo=* --enablerepo=qemu-kvm-ev-mn repoinfo".format(releasever)) != 0:
rsp.success = False
rsp.error = "no qemu-kvm-ev-mn repo found, cannot update host os"
elif shell.run(yum_cmd) != 0:
rsp.success = False
rsp.error = "failed to update host os using zstack-mn,qemu-kvm-ev-mn repo"
else:
logger.debug("successfully run: %s" % yum_cmd)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def init_host_moc(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
if cmd.mode not in ["iohub", "mocbr"]:
rsp.success = False
rsp.error = "unexpected mode: " + cmd.mode
else:
bash_r("/usr/local/bin/iohub_mocbr.sh %s start >> /var/log/iohubmocbr.log 2>&1" % cmd.mode)
if cmd.mode == 'mocbr':
bash_r("ip link set dev {} master {}".format(cmd.masterVethName, cmd.bridgeName))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def update_dependency(self, req):
rsp = UpdateDependencyRsp()
if self.IS_YUM:
releasever = kvmagent.get_host_yum_release()
yum_cmd = "export YUM0={};yum --enablerepo=* clean all && yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install `cat /var/lib/zstack/dependencies` -y".format(releasever)
if shell.run("export YUM0={};yum --disablerepo=* --enablerepo=zstack-mn repoinfo".format(releasever)) != 0:
rsp.success = False
rsp.error = "no zstack-mn repo found, cannot update kvmagent dependencies"
elif shell.run("export YUM0={};yum --disablerepo=* --enablerepo=qemu-kvm-ev-mn repoinfo".format(releasever)) != 0:
rsp.success = False
rsp.error = "no qemu-kvm-ev-mn repo found, cannot update kvmagent dependencies"
elif shell.run(yum_cmd) != 0:
rsp.success = False
rsp.error = "failed to update kvmagent dependencies using zstack-mn,qemu-kvm-ev-mn repo"
else :
logger.debug("successfully run: {}".format(yum_cmd))
elif self.IS_APT:
apt_cmd = "apt-get clean && apt-get -y --allow-unauthenticated install `cat /var/lib/zstack/dependencies`"
if shell.run(apt_cmd) != 0:
rsp.success = False
rsp.error = "failed to update kvmagent dependencies by {}.".format(apt_cmd)
else :
logger.debug("successfully run: {}".format(apt_cmd))
else :
rsp.success = False
rsp.error = "no yum or apt found, cannot update kvmagent dependencies"
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@in_bash
def get_xfs_frag_data(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetXfsFragDataRsp()
o = bash_o("df -hlT | awk 'NR==2 {print $1,$2}'")
o = str(o).strip().split(" ")
if len(o) != 2:
rsp.success = False
rsp.error = "failed to get root path and file system type"
return jsonobject.dumps(rsp)
root_path = o[0]
fs_type = o[1]
rsp.fsType = fs_type
if fs_type != "xfs":
return jsonobject.dumps(rsp)
if root_path is None:
rsp.error = "failed to find root device"
rsp.success = False
return jsonobject.dumps(rsp)
frag_percent = bash_o("xfs_db -c frag -r %s | awk '/fragmentation factor/{print $7}'" % root_path, True)
if not str(frag_percent).strip().endswith("%"):
rsp.error = "error format %s" % frag_percent
rsp.success = False
return jsonobject.dumps(rsp)
else:
rsp.hostFrag = frag_percent.strip()[:-1]
volume_path_dict = cmd.volumePathMap.__dict__
if volume_path_dict is not None:
for key, value in volume_path_dict.items():
r, o = bash_ro("xfs_bmap %s | wc -l" % value, True)
if r == 0:
o = o.strip()
rsp.volumeFragMap[key] = int(o) - 1
return jsonobject.dumps(rsp)
def shutdown_host(self, req):
self.do_shutdown_host()
return jsonobject.dumps(kvmagent.AgentResponse())
@thread.AsyncThread
def do_shutdown_host(self):
logger.debug("It is going to shutdown host after 1 sec")
time.sleep(1)
shell.call("sudo init 0")
@kvmagent.replyerror
@in_bash
def disable_hugepage(self, req):
rsp = DisableHugePageRsp()
return_code, stdout = self._close_hugepage()
if return_code != 0 or "Error" in stdout:
rsp.success = False
rsp.error = stdout
return jsonobject.dumps(rsp)
def _close_hugepage(self):
disable_hugepage_script = '''#!/bin/sh
grubs=("/boot/grub2/grub.cfg" "/boot/grub/grub.cfg" "/etc/grub2-efi.cfg" "/etc/grub-efi.cfg", "/boot/efi/EFI/centos/grub.cfg", "/boot/efi/EFI/kylin/grub.cfg")
# config nr_hugepages
sysctl -w vm.nr_hugepages=0
# enable nr_hugepages
sysctl vm.nr_hugepages=0
# config default grub
sed -i '/GRUB_CMDLINE_LINUX=/s/[[:blank:]]*hugepagesz[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' /etc/default/grub
sed -i '/GRUB_CMDLINE_LINUX=/s/[[:blank:]]*hugepages[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' /etc/default/grub
sed -i '/GRUB_CMDLINE_LINUX=/s/[[:blank:]]*transparent_hugepage[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' /etc/default/grub
line=`cat /etc/default/grub | grep GRUB_CMDLINE_LINUX`
result=$(echo $line | grep '\"$')
if [ ! -n "$result" ]; then
sed -i '/GRUB_CMDLINE_LINUX/s/$/\"/g' /etc/default/grub
fi
#clean boot grub config
for var in ${grubs[@]}
do
if [ -f $var ]; then
sed -i '/^[[:space:]]*linux/s/[[:blank:]]*hugepagesz[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' $var
sed -i '/^[[:space:]]*linux/s/[[:blank:]]*hugepages[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' $var
sed -i '/^[[:space:]]*linux/s/[[:blank:]]*transparent_hugepage[[:blank:]]*=[[:blank:]]*[[:graph:]]*//g' $var
fi
done
'''
fd, disable_hugepage_script_path = tempfile.mkstemp()
with open(disable_hugepage_script_path, 'w') as f:
f.write(disable_hugepage_script)
logger.info('close_hugepage_script_path is: %s' % disable_hugepage_script_path)
cmd = shell.ShellCmd('bash %s' % disable_hugepage_script_path)
cmd(False)
os.remove(disable_hugepage_script_path)
return cmd.return_code, cmd.stdout
@kvmagent.replyerror
@in_bash
def enable_hugepage(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = EnableHugePageRsp()
# clean old hugepage config
return_code, stdout = self._close_hugepage()
if return_code != 0 or "Error" in stdout:
rsp.success = False
rsp.error = stdout
return jsonobject.dumps(rsp)
pageSize = cmd.pageSize
reserveSize = cmd.reserveSize
enable_hugepage_script = '''#!/bin/sh
grubs=("/boot/grub2/grub.cfg" "/boot/grub/grub.cfg" "/etc/grub2-efi.cfg" "/etc/grub-efi.cfg", "/boot/efi/EFI/centos/grub.cfg", "/boot/efi/EFI/kylin/grub.cfg")
# byte to mib
let "reserveSize=%s/1024/1024"
pageSize=%s
memSize=`free -m | awk '/:/ {print $2;exit}'`
let "pageNum=(memSize-reserveSize)/pageSize"
if [ $memSize -lt $reserveSize ]
then
echo "Error:reserve size is bigger than system memory size"
exit 1
fi
#drop cache
echo 3 > /proc/sys/vm/drop_caches
# enable Transparent HugePages
echo always > /sys/kernel/mm/transparent_hugepage/enabled
# config grub
sed -i '/GRUB_CMDLINE_LINUX=/s/\"$/ transparent_hugepage=always default_hugepagesz=\'\"$pageSize\"\'M hugepagesz=\'\"$pageSize\"\'M hugepages=\'\"$pageNum\"\'\"/g' /etc/default/grub
#config boot grub
for var in ${grubs[@]}
do
if [ -f $var ]; then
sed -i '/^[[:space:]]*linux/s/$/ transparent_hugepage=always default_hugepagesz=\'\"$pageSize\"\'M hugepagesz=\'\"$pageSize\"\'M hugepages=\'\"$pageNum\"\'/g' $var
fi
done
''' % (reserveSize, pageSize)
fd, enable_hugepage_script_path = tempfile.mkstemp()
with open(enable_hugepage_script_path, 'w') as f:
f.write(enable_hugepage_script)
logger.info('enable_hugepage_script_path is: %s' % enable_hugepage_script_path)
cmd = shell.ShellCmd('bash | |
<filename>ndmaze/src/ndmaze.py
#! /usr/bin/env python3
import numpy as np
from enum import Enum
from random import randint
import argparse
import sys
def matrix_to_graph(matrix):
graph = dict()
for i, cell in enumerate(matrix):
if cell == 0:
graph[i] = matrix.getOrthogonalNeighbors()
class nd_maze():
def __init__(self, dimensions):
self.dimensionality = len(dimensions)
self.orth_directions = 2*self.dimensionality
self.diag_directions = 3**self.dimensionality-1
self.dimensions = dimensions
def draw(self):
pass
def generate(self, algo):
self.algo()
class matrix(nd_maze):
def __init__(self, dimensions, init=None):
super().__init__(dimensions)
if init == None:
self.maze = np.ones(dimensions)
else:
self.maze = init
def draw(self):
if self.dimensionality == 3:
for k in range(self.dimensions[2]):
print("\nheight {}".format(k))
for i in range(self.dimensions[0]):
for j in range(self.dimensions[1]):
if (self.maze[i,j,k] == 1):
print("#", end="")
elif (self.maze[i][j][k] == 2):
print("S", end="")
elif (self.maze[i][j][k] == 3):
print("F", end="")
else:
print(".", end="")
print("")
elif self.dimensionality != 2:
print("Requested {}d maze. Can only display 2d and 3d mazes in terminal.".format(self.dimensionality))
else:
print("")
for i in range(self.dimensions[0]):
for j in range(self.dimensions[1]):
if (self.maze[i][j] == 1):
print("#", end="")
elif (self.maze[i][j] == 2):
print("S", end="")
elif (self.maze[i][j] == 3):
print("F", end="")
else:
print(".", end="")
print("")
def getAllCells(maze):
result = []
for index, val in np.ndenumerate(maze.maze):
result.append(index)
return result
def fillBorders(maze):
for index, val in np.ndenumerate(maze.maze):
for i, dim in enumerate(index):
if dim == maze.dimensions[i] - 1 or dim == 0 :
maze.maze[index] = 1
def addEntrance(maze):
c = maze.rndBorderCell()
while (maze.maze[c] == 2 or maze.maze[c] == 3):
c = maze.rndBorderCell()
maze.maze[c] = 2
def addExit(maze):
c = maze.rndBorderCell()
while (maze.maze[c] == 2 or maze.maze[c] == 3):
c = maze.rndBorderCell()
maze.maze[c] = 3
def addEntranceAndExit(self):
self.addEntrance()
self.addExit()
def primMaze(maze):
walls = []
visited = []
start = maze.rndBorderBorderCell()
print(start)
maze.maze[start] = 0
visited.append(start)
neighbor_walls = maze.getOrthogonalNeighbors(start)
walls += neighbor_walls
# Pick a cell, mark it as part of the maze. Add the walls of the cell to the wall list.
while len(walls) > 0:
# print("")
# print(str(len(walls))+" walls")
# Pick a random wall from the list.
wall_num = randint(0,len(walls)-1)
rnd_wall = walls[wall_num]
neighbor_walls = maze.getOrthogonalNeighbors(rnd_wall)
only_path = maze.only1OrthVisited(neighbor_walls, visited)
# If only one of the two cells that the wall divides is visited, then:
if (only_path != -1 and rnd_wall not in maze.getAllBorderCells()):
# print("Only 1 visited path!")
# print("wall: "+str(rnd_wall))
# Make the wall a passage and mark the unvisited cell as part of the maze.
passage = maze.getPassage(only_path, rnd_wall)
# if (passage == None):
# print("Passage is null!")
# del walls[wall_num]
# continue
# print("Passage NOT null!")
if (passage != None):
visited.append(passage)
maze.maze[passage] = 0
# Add the neighboring walls of the cell to the wall list.
neighbor_walls = maze.getOrthogonalNeighbors(passage)
unique_walls = set(neighbor_walls) - set(walls)
# print("unique_walls length: "+str(len(unique_walls)))
walls += list(unique_walls)
visited.append(rnd_wall)
maze.maze[rnd_wall] = 0
# Remove the wall from the list.
del walls[wall_num]
def getPassage(maze, path, wall):
result = None
neighbors = maze.getOrthogonalNeighbors(wall)
diff_dim = -1
direction = 0
for i in range(maze.dimensionality):
if path[i] != wall[i]:
diff_dim = i
direction = wall[i] - path[i]
result = list(path)
result[diff_dim] = result[diff_dim]+(direction*2)
# print("passage: " + str(result))
result = tuple(result)
if (result in maze.getAllBorderCells()) or (result not in maze.getAllCells()):
# if (result not in maze.getAllCells()):
# print("passage rejected")
return None
return result
def only1OrthVisited(maze, neighbors, visited):
matches = 0
the_path = []
for index1 in neighbors:
# if (c1.x == 0 or c1.y == 0 or c1.x == length - 1 or c1.y == width - 1) return false
for index2 in visited:
if (index1 == index2):
matches += 1
the_path = index1
if matches > 1:
break
if matches == 1:
return the_path
return -1
def densityIsland(self, complexity=.5, density=.95): #TODO fix for n dimensions
# Assumes odd shape
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5 * (sum(self.dimensions))))
product = 1
for dim in self.dimensions:
product *= (dim // 2)
density = int(density * product)
# Build actual maze
self.maze = np.zeros(self.dimensions)
# Fill borders
self.fillBorders()
# Make aisles
for i in range(density):
variables = []
for x in reversed(self.dimensions):
variables.append(randint(0, x // 2) * 2)
print(variables)
print(self.maze)
# x, y = randint(0, shape[1] // 2) * 2, randint(0, shape[0] // 2) * 2
self.maze[list(reversed(variables))] = 1
print(self.maze)
# self.maze[y,x] = 1
for j in range(complexity):
neighbors = []
for index, k in enumerate(variables):
if k > 1:
temp = list(reversed(variables))
temp[self.dimensionality-1-index] = temp[self.dimensionality-1-index]-2
neighbors.append(temp)
if k < self.dimensions[self.dimensionality-1-index] - 2:
temp = list(reversed(variables))
temp[self.dimensionality-1-index] = temp[self.dimensionality-1-index]+2
neighbors.append(temp)
if len(neighbors):
print(neighbors)
variables2 = neighbors[randint(0, len(neighbors) - 1)]
print(variables2)
print(self.maze)
print(self.maze[variables2])
if (self.maze[variables2] == 0):
self.maze[variables2] = 1
# self.maze[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
variables3 = []
for index, x in enumerate(variables):
variables3[index] = variables2[index] + (variables[self.dimensionality-1-index] - variables2[index]) // 2
self.maze[variables3] = 1
variables = reversed(variables2)
self.addEntranceAndExit()
def densityIsland2D(self, complexity=.5, density=.95):
# Only odd shapes
shape = ((self.dimensions[0] // 2) * 2 + 1, (self.dimensions[1] // 2) * 2 + 1)
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5 * (shape[0] + shape[1])))
density = int(density * ((shape[0] // 2) * (shape[1] // 2)))
# Build actual maze
self.maze = np.zeros(shape, dtype=bool)
# Fill borders
self.maze[0, :] = self.maze[-1, :] = 1
self.maze[:, 0] = self.maze[:, -1] = 1
# Make aisles
for i in range(density):
x, y = randint(0, shape[1] // 2) * 2, randint(0, shape[0] // 2) * 2
self.maze[y, x] = 1
for j in range(complexity):
neighbours = []
if x > 1:
neighbours.append((y, x - 2))
if x < shape[1] - 2:
neighbours.append((y, x + 2))
if y > 1:
neighbours.append((y - 2, x))
if y < shape[0] - 2:
neighbours.append((y + 2, x))
if len(neighbours):
y_,x_ = neighbours[randint(0, len(neighbours) - 1)]
if self.maze[y_, x_] == 0:
self.maze[y_, x_] = 1
self.maze[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1
x, y = x_, y_
def monteCarloMaze(self, density=.1):
for index, x in np.ndenumerate(self.maze):
rnd = randint(0,0)
if rnd < 0*density:
self.maze[index] = 1
else:
self.maze[index] = 0
def getAllBorderCells(self):
border_cells = []
for index, val in np.ndenumerate(self.maze):
for i, dim in enumerate(index):
if dim == self.dimensions[i] - 1 or dim == 0 :
border_cells.append(index)
return border_cells
def getAllOddBorderBorderCells(self):
border_cells = []
for index, val in np.ndenumerate(self.maze):
good = True
for i, dim in enumerate(index):
if (dim % 2 == 0):
good == False
break
if good == False:
continue
for i, dim in enumerate(index):
if (dim == self.dimensions[i] - 2 or dim == 1) and (0 not in index and self.dimensions[i] - 1 not in index):
border_cells.append(index)
return border_cells
def excludeCorners(self, indexes):
result = []
for cell_index in indexes:
cell_is_corner = True
for i, index_num in enumerate(cell_index):
if index_num != 0 and index_num != self.dimensions[i] - 1:
cell_is_corner = False
break
if cell_is_corner == False:
result.append(cell_index)
return result
def rndBorderCell(self):
border_cells = self.getAllBorderCells()
no_corners = self.excludeCorners(border_cells)
rnd = randint(0,len(no_corners)-1)
return no_corners[rnd]
def rndBorderBorderCell(self):
border_border_cells = self.getAllOddBorderBorderCells()
rnd = randint(0,len(border_border_cells)-1)
return border_border_cells[rnd]
def checkerboardMaze(self):
for index, x in np.ndenumerate(self.maze):
if sum(index) % 2 == 0:
self.maze[index] = 1
else:
self.maze[index] = 0
def getAllCoordsOf(self, types):
result = []
for index, val in np.ndenumerate(self.maze):
if self.maze[index] in types:
result.append(index)
return result
def bruteForceStep(self, low_threshold=0, high_threshold=1, paths_connect = True, walls_connect = True):
self.checkerboardMaze()
self.fillBorders()
self.addEntranceAndExit()
cell_indexes = set(self.getAllCoordsOf([0,1])) - set(self.getAllBorderCells())
cell_indexes = list(cell_indexes)
while self.isSolvable() == False: #or (self.getDensity() < low_threshold or self.getDensity() > high_threshold) or self.allAreConnected("path") == False or self.allAreConnected("wall") == False:
rnd = randint(0, len(cell_indexes)-1)
if self.maze[cell_indexes[rnd]] == 1:
self.maze[cell_indexes[rnd]] = 0
else:
self.maze[cell_indexes[rnd]] = 1
def reset(self, fill=1):
if fill==1:
self.maze = np.ones(dimensions)
else:
self.maze = np.zeros(dimensions)
def excludeExteriors(self, indexes):
result = []
for cell_index in indexes:
cell_is_exterior = False
for i, index_num in enumerate(cell_index):
if index_num < 0 or index_num >= self.dimensions[i]:
cell_is_exterior = True
break
if cell_is_exterior == False:
result.append(cell_index)
return result
def getOrthogonalNeighbors(self, cell_index):
orthogonal_directions = []
| |
Input: height = [4,2,0,3,2,5]
Output: 9
"""
def trap(self, height: List[int]) -> int:
l, r = 0, len(height) - 1
l_max = r_max = area = 0
while l < r:
if height[l] < height[r]:
if height[l] < l_max:
area += l_max - height[l]
else:
l_max = height[l]
l += 1
else:
if height[r] < r_max:
area += r_max - height[r]
else:
r_max = height[r]
r -= 1
return area
class _200:
"""
# - Number of Islands -
# https://leetcode.com/problems/number-of-islands/
Given an m x n 2D binary grid grid which represents a map of '1's (land)
and '0's (water), return the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands
horizontally or vertically. You may assume all four edges of the grid are
all surrounded by water.
Example 1:
Input: grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
Output: 1
Example 2:
Input: grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
Output: 3
"""
def numIslands(self, grid: List[List[str]]) -> int:
# DFS method, O(mn) time and space
def fn(i, j, grid: List[List[str]]) -> None:
"""Flood island with 0s."""
if (
i < 0
or j < 0
or i == len(grid)
or j == len(grid[0])
or grid[i][j] == "0"
):
return
else:
grid[i][j] = "0"
fn(i, j + 1, grid)
fn(i, j - 1, grid)
fn(i + 1, j, grid)
fn(i - 1, j, grid)
islands = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
islands += 1
fn(i, j, grid)
return islands
"""
index = i * n + j
1 1
1 0
0,0 0,1
1,0 1,1
0*2+0 0*2+1 0 1
1*2+0 1*2+1 2 3
"""
def numIslands_(self, grid: List[List[str]]) -> int:
# UnionFind method, O(mn) time and space
# but each operation takes O(inverse Ackermann function) time
# which is essentially O(1), thus superior to DFS
if len(grid) == 0:
return 0
m, n = len(grid), len(grid[0])
count = sum(grid[i][j] == "1" for i in range(m) for j in range(n))
uf = UnionFind(m * n, count)
for i in range(m):
for j in range(n):
if grid[i][j] == "0":
continue
index = i * n + j
if j < n - 1 and grid[i][j + 1] == "1":
uf.union(index, index + 1)
if i < m - 1 and grid[i + 1][j] == "1":
uf.union(index, index + n)
return uf.count
class _4:
"""
# - Median of Two Sorted Arrays -
# https://leetcode.com/problems/median-of-two-sorted-arrays/
Given two sorted arrays nums1 and nums2 of size m and n respectively,
return the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
Example 1:
Input: nums1 = [1,3], nums2 = [2]
Output: 2.00000
Explanation: merged array = [1,2,3] and median is 2.
Example 2:
Input: nums1 = [1,2], nums2 = [3,4]
Output: 2.50000
Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.
"""
# https://www.youtube.com/watch?v=LPFhl65R7ww
# https://github.com/mission-peace/interview/blob/master/src/com/interview/binarysearch/MedianOfTwoSortedArrayOfDifferentLength.java
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
"""O(log(min(m,n)) time, O(m+n) space"""
x, y = sorted((nums1, nums2), key=len)
low, high = 0, len(x)
while low <= high:
x_partition = (low + high) // 2
y_partition = (len(x) + len(y) + 1) // 2 - x_partition
# four points of interest about the two partitions
max_l_x = x[x_partition - 1] if x_partition > 0 else -inf
max_l_y = y[y_partition - 1] if y_partition > 0 else -inf
min_r_x = x[x_partition] if x_partition < len(x) else inf
min_r_y = y[y_partition] if y_partition < len(y) else inf
if max_l_x <= min_r_y and max_l_y <= min_r_x: # valid partition
if (len(x) + len(y)) % 2 == 0: # even
return (max(max_l_x, max_l_y) + min(min_r_x, min_r_y)) / 2
else: # odd
return max(max_l_x, max_l_y)
# binary search
elif max_l_x > min_r_y:
high = x_partition - 1
else:
low = x_partition + 1
raise RuntimeError("Couldn't find median of two sorted arrays!")
class _121:
"""
# - Best Time to Buy and Sell Stock -
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
You are given an array prices where prices[i] is the price of a given stock
on the ith day.
You want to maximize your profit by choosing a single day to buy one stock
and choosing a different day in the future to sell that stock.
Return the maximum profit you can achieve from this transaction. If you
cannot achieve any profit, return 0.
Example 1:
Input: prices = [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6),
profit = 6-1 = 5.
Note that buying on day 2 and selling on day 1 is not allowed because you
must buy before you sell.
Example 2:
Input: prices = [7,6,4,3,1]
Output: 0
Explanation: In this case, no transactions are done and the max profit = 0.
"""
def maxProfit(self, prices: List[int]) -> int:
"""O(n) time, O(1) space"""
buy, profit = prices[0], 0
for price in prices:
buy = min(buy, price)
profit = max(profit, price - buy)
return profit
class _1249:
"""
# - Minimum Remove to Make Valid Parentheses -
# https://leetcode.com/problems/minimum-remove-to-make-valid-parentheses/
Given a string s of '(' , ')' and lowercase English characters.
Your task is to remove the minimum number of parentheses ('(' or ')', in
any positions) so that the resulting parentheses string is valid and
return any valid string.
Formally, a parentheses string is valid if and only if:
- It is the empty string, contains only lowercase characters, or
- It can be written as AB (A concatenated with B), where A and B are
valid strings, or
- It can be written as (A), where A is a valid string.
Example 1:
Input: s = "lee(t(c)o)de)"
Output: "lee(t(c)o)de"
Explanation: "lee(t(co)de)" , "lee(t(c)ode)" would also be accepted.
Example 2:
Input: s = "a)b(c)d"
Output: "ab(c)d"
Example 3:
Input: s = "))(("
Output: ""
Explanation: An empty string is also valid.
"""
def minRemoveToMakeValid(self, s: str) -> str:
"""O(n) time and space"""
list_s = list(s)
stack: List[int] = []
for i, c in enumerate(list_s):
if c == "(":
stack.append(i)
elif c == ")":
if stack: # matching
stack.pop()
else: # extra
list_s[i] = ""
while stack:
list_s[stack.pop()] = ""
return "".join(list_s)
class _20:
"""
# - Valid Parentheses -
# https://leetcode.com/problems/valid-parentheses/
Given a string s containing just the characters '(', ')', '{', '}',
'[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Example 1:
Input: s = "()"
Output: true
Example 2:
Input: s = "()[]{}"
Output: true
Example 3:
Input: s = "(]"
Output: false
"""
def isValid(self, s: str) -> bool:
"""O(n) time and space"""
stack: List[str] = []
match = {"(": ")", "[": "]", "{": "}"}
for x in s:
if x in match:
stack.append(x)
elif not stack or match[stack.pop()] != x:
return False
return not stack
class _53:
"""
# - Maximum Subarray -
# https://leetcode.com/problems/maximum-subarray/
Given an integer array nums, find the contiguous subarray (containing at
least one number) which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
"""
def maxSubArray(self, nums: List[int]) -> int:
"""O(n) time, O(1) space"""
cur = ans = nums[0]
for num in nums[1:]:
cur = max(num, cur + num)
ans = max(ans, cur)
return ans
class _68:
"""
# - Text Justification -
# https://leetcode.com/problems/text-justification/
Given an array of strings words and a width maxWidth, format the text
such that each line has exactly maxWidth | |
'┠'
rb = '┨'
anim1 = ['⣾', '⣽', '⣻', '⢿', '⡿', '⣟', '⣯', '⣷']
anim2 = ['⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽', '⣾']
bs = len(ls) + len(anim1[0]) + len(lb) + len(rb) + len(anim2[0])
maxlen = 0
f = min(len(anim1), len(anim2))
barlength = None
minpctsz = len("─1%─┤")
cd = lambda x: colored(x, 'yellow')
def eraseLine(flush: bool = False):
cprint(cd(' ' * maxlen), end = '\r', flush = flush)
i = 0
pct = 0.0
while pct < 1.0:
pct = waitFunc()
assert pct <= 1.0
newbarlength = get_terminal_size(fallback = (72, 24)).columns - bs
if barlength and barlength != newbarlength:
eraseLine(flush = True)
barlength = newbarlength
c = int(pct * barlength)
if c == 0:
arrow = ""
elif c == 1:
arrow = '┤'
elif c < minpctsz:
arrow = (c - 1) * '─' + '┤'
else:
p100 = str(int(100 * pct)) + '%'
rmdr = c - len(p100) - 1 # 1 for arrowhead
left = rmdr >> 1
right = rmdr - left
arrow = left * '─' + p100 + right * '─' + '┤'
s = ls + anim1[i % f] + lb + arrow + ' ' * (barlength - c) + \
rb + anim2[i % f]
maxlen = max(maxlen, len(s))
print(cd(s), end='\r', flush=True)
if pct == 1.0:
# When we finish, erase all traces of progress meter
eraseLine(flush = False)
return
i += 1
time.sleep(0.25)
def spinWaitTest():
count = 0.0
def countUp(maxinc: float) -> float:
nonlocal count
count += random.uniform(0.0, maxinc)
if count > 1.0:
count = 1.0
return count
maxinc = 0.01
for i in range(1,6):
spinWait(lambda: countUp(maxinc))
count = 0.0
maxinc *= 1.5
# A class for recording ssh tunnels
class Tunnel:
def __init__(self, shortname: str, bastionIp: ipaddress.IPv4Address,
lPort: int, rAddr: str, rPort: int):
self.shortname = shortname
self.bastion = bastionIp
self.lport = lPort
self.raddr = rAddr
self.rport = rPort
self.p = None
self.command = "ssh -N -L{p}:{a}:{k} ubuntu@{b}".format(p = lPort, a =
rAddr, k = rPort, b = bastionIp)
print(self.command)
self.p = subprocess.Popen(self.command.split())
announce("Created tunnel " + str(self))
def __del__(self):
announce("Terminating tunnel " + str(self))
if ns.summarise_ssh_tunnels:
print(self.command)
if self.p != None:
self.p.terminate()
def __str__(self):
tgtname = self.shortname
if len(self.raddr) < 16:
tgtname = "[{n}]{h}".format(n = self.shortname, h = self.raddr)
return "{l} -> {ra}:{rp} (PID {p})".format(l = self.lport, ra =
tgtname, rp = self.rport, p = self.p.pid if self.p else "?")
# Input dictionary is the output variables from Terraform.
def establishBastionTunnel(env: dict) -> list[Tunnel]:
# The new bastion server will have a new host key. Delete the old one we
# have and grab the new one.
announce(f"Replacing bastion host keys in {knownhosts}")
try:
runStdout("ssh-keygen -q -R {b}".format(b =
env["bastion_address"]).split())
except CalledProcessError as e:
print("Unable to remove host key for {b} in {k}. Is file "
"missing?".format(b = env["bastion_address"], k = knownhosts))
cmd = "ssh-keyscan -4 -p22 -H {b}".format(b = env["bastion_address"])
f = lambda: run(cmd.split(), check = True, verbose = False)
print(cmd)
cp = retryRun(f, 3, cmd)
if cp.returncode != 0:
sys.exit("Unable, after repeated attempts, to contact new host "
"{}".format(env["bastion_address"]))
hostkeys = cp.stdout.strip()
print("Adding {n} host keys from bastion to {k}".format(n =
len(hostkeys.splitlines()), k = knownhosts))
appendToFile(knownhosts, hostkeys)
tuns = []
# Start up the tunnel to the Kubernetes API server
tun = Tunnel("k8s-apiserver", env["bastion_address"],
getLclPort("apiserv"), env["k8s_api_server"],
getRmtPort("apiserv"))
tuns.append(tun)
# Now that the tunnel is in place, update our kubecfg with the address to
# the tunnel, keeping everything else in place
updateKubeConfig()
# Ensure that we can talk to the api server
announce("Waiting for api server to respond")
spinWait(waitUntilApiServerResponding)
# Start up the tunnel to the LDAP server
if authnldap:
assert tlsenabled()
tun = Tunnel("ldaps", env["bastion_address"], getLclPort("ldaps"),
ldapfqdn, getRmtPort("ldaps"))
tuns.append(tun)
# Copy my private RSA key over to the bastion.
# FIXME Yes, that is slightly dangerous.
if upstreamSG:
announce("Copying over private RSA key to bastion")
runTry("scp {r} ubuntu@{h}:/home/ubuntu/.ssh/id_rsa".format(r = rsa,
h = env["bastion_address"]).split())
return tuns
def addAwsAuthConfigMap(workerIamRoleArn: str) -> None:
# If we've already got an aws auth config map, we're done
r = runTry(f"{kube} describe configmap -n kube-system aws-auth".split())
if r.returncode == 0:
announce("aws-auth configmap already installed")
return
# Parameterise the aws auth config map template with the node role arn
changed, yamltmp = parameteriseTemplate(awsauthcm, tfdir, {"NodeRoleARN":
workerIamRoleArn}, {'EC2PrivateDNSName'})
# Nodes should start joining after this
announce("Adding aws-auth configmap to cluster")
runStdout(f"{kube} apply -f {yamltmp}".split())
def ensureClusterIsStarted(skipClusterStart: bool) -> \
tuple[list[Tunnel], dict]:
env = myvars | {
"BastionLaunchScript": bastlaunchf,
"BucketName": bucket,
"ClusterName": clustname,
"DownstreamSG": downstreamSG,
"DbInstanceType": dbInstanceType,
"DBName": dbschema,
"DBNameEventLogger": dbevtlog,
"DBPassword": <PASSWORD>,
"DBUser": dbuser,
"InstanceType": instanceType,
"LdapLaunchScript": ldaplaunchf,
"MaxPodsPerNode": maxpodpnode,
"MyCIDR": mySubnetCidr,
"MyPublicIP": getMyPublicIp(),
"NodeCount": nodeCount,
"SmallInstanceType": smallInstanceType,
"SshPublicKey": getSshPublicKey(),
"Region": region,
"ShortName": shortname,
"UpstrBastion": upstrBastion,
"UpstreamSG": upstreamSG,
"UserName": username,
"Zone": zone
}
assert target in clouds
if target == "az":
env["ResourceGroup"] = resourcegrp
env["StorageAccount"] = storageacct
elif target == "gcp":
env["GcpProjectId"] = gcpproject
env["GcpAccount"] = gcpaccount
parameteriseTemplate(tfvars, tfdir, env)
# The terraform run. Perform an init, then an apply.
if not skipClusterStart:
announce("Starting terraform run")
t = time.time()
runStdout(f"{tf} init -upgrade -input=false".split())
runStdout(f"{tf} apply -auto-approve -input=false".split())
announce("terraform run completed in " + time.strftime("%Hh%Mm%Ss",
time.gmtime(time.time() - t)))
# Get variables returned from terraform run
env = getOutputVars()
# Start up ssh tunnels via the bastion, so we can run kubectl and ldap
# locally from the workstation
tuns = establishBastionTunnel(env)
# For AWS, the nodes will not join until we have added the node role ARN to
# the aws-auth-map-cn.yaml.
if target == "aws":
addAwsAuthConfigMap(env["worker_iam_role_arn"])
# Don't continue until all nodes are ready
announce("Waiting for nodes to come online")
spinWait(lambda: waitUntilNodesReady(nodeCount))
# Don't continue until all K8S system pods are ready
announce("Waiting for K8S system pods to come online")
spinWait(lambda: waitUntilPodsReady(nodeCount*2, "kube-system"))
return tuns, env
# Starburst pods sometimes get stuck in Terminating phase after a helm upgrade.
# Kill these off immediately to save time and they will restart quickly.
def killAllTerminatingPods() -> None:
lines = runCollect(f"{kubens} get pods --no-headers".split()).splitlines()
for l in lines:
col = l.split()
name = col[0]
status = col[2]
if status == "Terminating":
r = runTry(f"{kubens} delete pod {name} --force "
"--grace-period=0".split())
if r.returncode == 0:
print(f"Terminated pod {name}")
# TODO Azure and GCP allow static IPs to be specified for LBs, so we rely on
# that to set up LDAP during our Terraform run with those IPs. AWS doesn't
# allow IPs to be explicitly set for load balancers, so we have to take a
# different approach post-Terraform, which is to create a CNAME in Route 53
# that references the (classic) LBs that AWS sets up.
def setRoute53Cname(lbs: dict[str, str], route53ZoneId: str,
delete: bool = False) -> None:
announce("{v} route53 entries for {s}".format(v = "Deleting" if delete else
"Creating", s = ", ".join(services)))
batchf = f"{tmpdir}/crrs_batch.json"
batch: Dict[str, Any] = {
"Comment": "DNS CNAME records for starburst and ranger.",
"Changes": []
}
action = "DELETE" if delete else "UPSERT"
for name, host in lbs.items():
assert name in services
# If multi-cloud Stargate is enabled, then we want each worker to point
# to the bastion address, so we can pipe it to the next Starburst
# instance rather than to itself. We are actually pointing the
# Starburst FQDN to the bastion FQDN via a CNAME, then the bastion FQDN
# has an A record that points to the internal bastion IP.
if upstreamSG and name == "starburst":
host = bastionfqdn
batch["Changes"].append({
"Action": action,
"ResourceRecordSet": {
"Name": f"{name}.{domain}",
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [{ "Value": host }]}})
replaceFile(batchf, json.dumps(batch))
cmd = "aws route53 change-resource-record-sets --hosted-zone-id " \
f"{route53ZoneId} --change-batch file://{batchf}"
runCollect(cmd.split())
def startPortForwardToLBs(bastionIp: str,
route53ZoneId: str = None) -> list[Tunnel]:
tuns: list[Tunnel] = []
# should be nodeCount - 1 workers with 1 container each, 2 containers for
# the coordinator, and 2 containers each for Hive and Ranger
announce("Waiting for pods to be ready")
spinWait(lambda: waitUntilPodsReady(nodeCount + 5, namespace))
# coordinator, worker, hive, ranger, 1 replica each = 4 | |
of corruption to use
corruption_level = T.scalar('corruption')
# momentum rate to use
momentum = T.scalar('momentum')
assert method in ['cm','adagrad','adagrad_momentum']
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_gparams(corruption_level,learning_rate)
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, updates)
input_list = [index,momentum,theano.Param(corruption_level, default=0.25)]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, updates)
input_list = [index,theano.Param(corruption_level, default=0.25)]
else:
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, updates)
input_list = [index,momentum,theano.Param(corruption_level, default=0.25)]
# compile the theano function
fn = theano.function(inputs=input_list,
outputs=cost,
updates=mod_updates,
givens={self.x: train_set_x[batch_begin:
batch_end]})
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_limited_reconstruction(self, train_set_x, batch_size, learning_rate, method='cm'):
''' Generates a list of theano functions, each of them implementing one
step in hybrid pretraining. Hybrid pretraining is traning to minimize the
reconstruction error of the data against the representation produced using
two or more layers of the SdA.
N.B: learning_rate should be a theano.shared variable declared in the
code driving the (pre)training of this SdA.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: theano.tensor.shared
:param learning_rate: the learning rate for pretraining
:type method: string
:param method: specifies the flavour of SGD used to train each dA layer. Accepted values are 'cm', 'adagrad', 'adagrad_momentum' '''
# index to a minibatch
index = T.lscalar('index')
# momentum rate to use
momentum = T.scalar('momentum')
# weight decay to use
weight_decay = T.scalar('weight_decay')
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
# sanity check on number of layers
assert 2 < len(self.dA_layers)
# Check on SGD method
assert method in ['cm','adagrad','adagrad_momentum','cm_wd','adagrad_momentum_wd']
hybrid_train_fns = []
for i in xrange(2,len(self.dA_layers)):
# get the subset of model params involved in the limited reconstruction
limited_params = self.params[:i*3]
# compute the gradients with respect to the partial model parameters
gparams = T.grad(self.reconstruction_error_limited(self.x, i), limited_params)
# Ensure that gparams has same size as limited_params
assert len(gparams) == len(limited_params)
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, zip(limited_params,gparams))
input_list = [index,momentum]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, zip(limited_params,gparams))
input_list = [index]
elif method == 'adagrad_momentum':
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, zip(limited_params,gparams))
input_list = [index,momentum]
elif method == 'cm_wd':
mod_updates = self.sgd_cm_wd(learning_rate, momentum, weight_decay, zip(limited_params,gparams))
input_list = [index,momentum,weight_decay]
else:
mod_updates = self.sgd_adagrad_momentum_wd(momentum, learning_rate, weight_decay, zip(limited_params,gparams))
input_list = [index,momentum,weight_decay]
# the hybrid pre-training function now takes into account the update algorithm and proper input
fn = theano.function(inputs=input_list,
outputs=self.reconstruction_error_limited(self.x, i),
updates=mod_updates,
givens={self.x: train_set_x[batch_begin:
batch_end]})
# append `fn` to the list of functions
hybrid_train_fns.append(fn)
return hybrid_train_fns
def build_finetune_full_reconstruction(self, datasets, batch_size, learning_rate, method='cm'):
'''
Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the reconstruction
error on a batch from the validation set
:type datasets: tuple of theano.tensor.TensorType
:param datasets: A tuple of two datasets;
`train`, `valid` in this order, each
one is a T.dmatrix of datapoints
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: theano.tensor.shared
:param learning_rate: learning rate used during finetune stage
:type method: string
:param method: specifies the flavour of SGD used to train each dA layer. Accepted values are 'cm', 'adagrad', 'adagrad_momentum'
'''
(train_set_x, valid_set_x) = datasets
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# momentum rate to use
momentum = T.scalar('momentum')
# weight decay value to use
weight_decay = T.scalar('weight_decay')
assert method in ['cm','adagrad','adagrad_momentum','cm_wd','adagrad_momentum_wd']
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, zip(self.params,gparams))
input_list = [index,momentum]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, zip(self.params,gparams))
input_list = [index]
elif method == 'adagrad_momentum':
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, zip(self.params,gparams))
input_list = [index,momentum]
elif method == 'cm_wd':
mod_updates = self.sgd_cm_wd(learning_rate, momentum, weight_decay, zip(self.params,gparams))
input_list = [index,momentum,weight_decay]
else:
mod_updates = self.sgd_adagrad_momentum_wd(momentum, learning_rate, weight_decay, zip(self.params,gparams))
input_list = [index,momentum,weight_decay]
# compile the fine-tuning theano function, taking into account the update algorithm
train_fn = theano.function(inputs=input_list,
outputs=self.finetune_cost,
updates=mod_updates,
givens={
self.x: train_set_x[index * batch_size:
(index + 1) * batch_size]})
valid_score_i = theano.function([index], self.errors,
givens={
self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size]})
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
return train_fn, valid_score
def build_encoding_functions(self, dataset):
''' Generates a function `encode` that feeds the data forward
through the layers of the SdA and results in a lower dimensional
output, which is the representation of the highest layer.
:type dataset: theano.tensor.TensorType
:param dataset: A T.dmatrix of datapoints to be fed through the SdA
'''
start = T.lscalar('start')
end = T.lscalar('end')
encode_fn = theano.function(inputs=[start,end],
outputs=self.output,
givens={self.x: dataset[start:end]})
return encode_fn
def test_gradient(self,dataset,index=1,batch_size=1):
''' Return a Theano function that will evaluate
the gradient wrt some points sampled from the provided dataset)
Example provided by http://deeplearning.net/software/theano/tutorial/gradients.html#tutcomputinggrads
x = T.dmatrix('x')
s = T.sum(1 / (1 + T.exp(-x)))
gs = T.grad(s, x)
dlogistic = function([x], gs)
dlogistic([[0, 1], [-1, -2]])
:type dataset: theano.tensor.TensorType
:param dataset: A T.dmatrix of datapoints, should be a shared variable.
:type index: int
:param index: identifies the start of the gradient test batch of data, a subset of dataset.
:type batch_size: int
:param batch_size: size of the test batch.
'''
index_val = T.lscalar('gtestindex') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# create a function to evaluate the gradient on the batch at index
eval_grad = theano.function(inputs=[index_val], outputs=gparams, givens= {self.x: dataset[index_val * batch_size: (index_val + 1) * batch_size]})
return eval_grad
##################### Pickling functions ###############################
def __getstate__(self):
""" Pickle this SdA by tupling up the layers, output size, dA param lists, corruption levels and layer types. """
W_list = []
bhid_list = []
bvis_list = []
for layer in self.dA_layers:
W, bhid, bvis = layer.get_params()
W_list.append(W.get_value(borrow=True))
bhid_list.append(bhid.get_value(borrow=True))
bvis_list.append(bvis.get_value(borrow=True))
return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)
def __setstate__(self, state):
""" Unpickle an SdA model by restoring the list of dA layers.
The input should be provided to the initial layer, and the input of layer i+1 is set to the output of layer i.
Fill up the self.params from the dA params lists. """
(layers, n_outs, dA_W_list, dA_bhid_list, dA_bvis_list, corruption_levels, layer_types, use_loss, dropout_rates, opt_method) = state
self.n_layers = layers
self.n_outs = n_outs
self.corruption_levels = corruption_levels
self.layer_types = layer_types
self.dA_layers = []
self.use_loss = use_loss
self.opt_method = opt_method
self.params = []
self.x = T.matrix('x') # symbolic input for the training data
self.x_prime = T.matrix('X_prime') # symbolic output for the top layer dA
numpy_rng = np.random.RandomState(123)
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# Set the dropout rates
if dropout_rates is not None:
self.dropout_rates = dropout_rates
else:
self.dropout_rates = [1.0 for i in xrange(self.n_layers)]
# build each layer dynamically
layer_classes = {'gaussian': GaussianAutoEncoder, 'bernoulli': BernoulliAutoEncoder, 'relu': ReluAutoEncoder}
for i in xrange(self.n_layers):
# the input to this layer is either the activation of the hidden
# layer below or the | |
18.1043],
[6002500, 2, 0, 4, 4, 19.5111],
[6021120, 13, 1, 1, 2, 16.7469],
[6048000, 8, 3, 3, 1, 16.1748],
[6050520, 3, 2, 1, 5, 22.1156],
[6075000, 3, 5, 5, 0, 17.0911],
[6096384, 9, 5, 0, 2, 16.3736],
[6123600, 4, 7, 2, 1, 16.1739],
[6125000, 3, 0, 6, 2, 18.9867],
[6144000, 14, 1, 3, 0, 17.1476],
[6146560, 9, 0, 1, 4, 18.9854],
[6174000, 4, 2, 3, 3, 19.79],
[6193152, 15, 3, 0, 1, 14.0185],
[6200145, 0, 11, 1, 1, 19.5937],
[6220800, 10, 5, 2, 0, 15.1014],
[6223392, 5, 4, 0, 4, 16.742],
[6250000, 4, 0, 8, 0, 17.3484],
[6251175, 0, 6, 2, 3, 21.0156],
[6272000, 10, 0, 3, 2, 16.9272],
[6291456, 21, 1, 0, 0, 15.7291],
[6298560, 6, 9, 1, 0, 15.7422],
[6300000, 5, 2, 5, 1, 18.3778],
[6302625, 0, 1, 3, 5, 23.8994],
[6322176, 11, 2, 0, 3, 17.6407],
[6328125, 0, 4, 7, 0, 19.3691],
[6350400, 6, 4, 2, 2, 15.2941],
[6353046, 1, 3, 0, 6, 23.3314],
[6377292, 2, 13, 0, 0, 16.5339],
[6378750, 1, 6, 4, 1, 21.2589],
[6400000, 11, 0, 5, 0, 15.5825],
[6422528, 17, 0, 0, 2, 15.5618],
[6429780, 2, 8, 1, 2, 19.8161],
[6431250, 1, 1, 5, 3, 25.1026],
[6451200, 12, 2, 2, 1, 16.0292],
[6453888, 7, 1, 0, 5, 20.5149],
[6480000, 7, 4, 4, 0, 13.8726],
[6482700, 2, 3, 2, 4, 21.3675],
[6531840, 8, 6, 1, 1, 18.4664],
[6553600, 18, 0, 2, 0, 13.9588],
[6561000, 3, 8, 3, 0, 16.9924],
[6562500, 2, 1, 7, 1, 22.614],
[6585600, 8, 1, 2, 3, 19.819],
[6588344, 3, 0, 0, 7, 23.023],
[6613488, 4, 10, 0, 1, 18.6663],
[6615000, 3, 3, 4, 2, 19.3855],
[6635520, 14, 4, 1, 0, 14.9667],
[6667920, 4, 5, 1, 3, 21.5419],
[6718464, 10, 8, 0, 0, 14.4053],
[6720000, 9, 1, 4, 1, 19.9709],
[6722800, 4, 0, 2, 5, 20.9469],
[6750000, 4, 3, 6, 0, 19.1538],
[6751269, 0, 9, 0, 3, 21.6657],
[6773760, 10, 3, 1, 2, 19.2277],
[6804000, 5, 5, 3, 1, 19.0909],
[6806835, 0, 4, 1, 5, 24.6768],
[6834375, 0, 7, 5, 0, 20.79],
[6858432, 6, 7, 0, 2, 16.9715],
[6860000, 5, 0, 4, 3, 19.3809],
[6881280, 16, 1, 1, 1, 19.1423],
[6889050, 1, 9, 2, 1, 22.5203],
[6890625, 0, 2, 6, 2, 24.561],
[6912000, 11, 3, 3, 0, 17.3241],
[6914880, 6, 2, 1, 4, 21.503],
[6945750, 1, 4, 3, 3, 25.0084],
[6967296, 12, 5, 0, 1, 19.1726],
[6998400, 7, 7, 2, 0, 15.4125],
[7000000, 6, 0, 6, 1, 19.6882],
[7001316, 2, 6, 0, 4, 23.3009],
[7024640, 12, 0, 1, 3, 19.4782],
[7031250, 1, 2, 8, 0, 25.6055],
[7056000, 7, 2, 3, 2, 19.3792],
[7058940, 2, 1, 1, 6, 28.1096],
[7077888, 18, 3, 0, 0, 14.7314],
[7085880, 3, 11, 1, 0, 19.6427],
[7087500, 2, 4, 5, 1, 21.6441],
[7112448, 8, 4, 0, 3, 18.5072],
[7144200, 3, 6, 2, 2, 21.3672],
[7168000, 13, 0, 3, 1, 18.5396],
[7200000, 8, 2, 5, 0, 18.5713],
[7203000, 3, 1, 3, 4, 25.421],
[7225344, 14, 2, 0, 2, 16.8964],
[7257600, 9, 4, 2, 1, 18.6249],
[7260624, 4, 3, 0, 5, 23.0966],
[7290000, 4, 6, 4, 0, 19.0828],
[7340032, 20, 0, 0, 1, 15.6188],
[7348320, 5, 8, 1, 1, 19.5004],
[7350000, 4, 1, 5, 2, 23.3534],
[7372800, 15, 2, 2, 0, 16.2988],
[7375872, 10, 1, 0, 4, 22.7221],
[7381125, 0, 10, 3, 0, 24.3076],
[7408800, 5, 3, 2, 3, 21.4507],
[7411887, 0, 2, 0, 7, 28.0701],
[7440174, 1, 12, 0, 1, 22.5729],
[7441875, 0, 5, 4, 2, 23.3703],
[7464960, 11, 6, 1, 0, 19.3943],
[7500000, 5, 1, 7, 0, 21.634],
[7501410, 1, 7, 1, 3, 27.6552],
[7503125, 0, 0, 5, 4, 26.0661],
[7526400, 11, 1, 2, 2, 20.8854],
[7529536, 6, 0, 0, 6, 22.153],
[7558272, 7, 10, 0, 0, 18.0857],
[7560000, 6, 3, 4, 1, 19.6511],
[7563150, 1, 2, 2, 5, 29.4575],
[7593750, 1, 5, 6, 0, 27.0084],
[7620480, 7, 5, 1, 2, 21.0763],
[7654500, 2, 7, 3, 1, 23.7918],
[7656250, 1, 0, 7, 2, 27.1514],
[7680000, 12, 1, 4, 0, 19.069],
[7683200, 7, 0, 2, 4, 20.5499],
[7715736, 3, 9, 0, 2, 21.902],
[7717500, 2, 2, 4, 3, 26.6196],
[7741440, 13, 3, 1, 1, 20.0735],
[7776000, 8, 5, 3, 0, 19.6432],
[7779240, 3, 4, 1, 4, 25.0938],
[7812500, 2, 0, 9, 0, 24.5237],
[7838208, 9, 7, 0, 1, 20.5854],
[7840000, 8, 0, 4, 2, 19.8058],
[7864320, 19, 1, 1, 0, 19.7581],
[7873200, 4, 9, 2, 0, 19.583],
[7875000, 3, 2, 6, 1, 26.233],
[7902720, 9, 2, 1, 3, 25.8356],
[7938000, 4, 4, 3, 2, 22.0025],
[7962624, 15, 5, 0, 0, 21.8659],
[7971615, 0, 13, 1, 0, 23.4199],
[8000000, 9, 0, 6, 0, 22.0011],
[8001504, 5, 6, 0, 3, 23.6534],
[8028160, 15, 0, 1, 2, 19.6705],
[8037225, 0, 8, 2, 2, 23.76],
[8064000, 10, 2, 3, 1, 23.279],
[8067360, 5, 1, 1, 5, 29.0941],
[8100000, 5, 4, 5, 0, 20.0103],
[8103375, 0, 3, 3, 4, 28.7766],
[8128512, 11, 4, 0, 2, 19.242],
[8164800, 6, 6, 2, 1, 21.6946],
[8168202, 1, 5, 0, 5, 30.9186],
[8192000, 16, 0, 3, 0, 23.9381],
[8201250, 1, 8, 4, 0, 24.183],
[8203125, 0, 1, 8, 1, 30.6303],
[8232000, 6, 1, 3, 3, 26.3667],
[8235430, 1, 0, 1, 7, 33.6397],
[8257536, 17, 2, 0, 1, 20.4696],
[8266860, 2, 10, 1, 1, 26.8346],
[8268750, 1, 3, 5, 2, 29.0819],
[8294400, 12, 4, 2, 0, 17.4199],
[8297856, 7, 3, 0, 4, 22.61],
[8334900, 2, 5, 2, 3, 27.113],
[8388608, 23, 0, 0, 0, 16.8528],
[8398080, 8, 8, 1, 0, 19.6904],
[8400000, 7, 1, 5, 1, 24.6334],
[8403500, 2, 0, 3, 5, 29.5985],
[8429568, 13, 1, 0, 3, 23.5547],
[8437500, 2, 3, 7, 0, 25.5434],
[8467200, 8, 3, 2, 2, 21.734],
[8470728, 3, 2, 0, 6, 29.4173],
[8503056, 4, 12, 0, 0, 18.8457],
[8505000, 3, 5, 4, 1, 25.1658],
[8573040, 4, 7, 1, 2, 25.0764],
[8575000, 3, 0, 5, 3, 27.7504],
[8601600, 14, 1, 2, 1, 22.2381],
[8605184, 9, 0, 0, 5, 26.5756],
[8640000, 9, 3, 4, 0, 21.8341],
[8643600, 4, 2, 2, 4, 26.7111],
[8680203, 0, 11, 0, 2, 26.0915],
[8709120, 10, 5, 1, 1, 25.5533],
[8748000, 5, 7, 3, 0, 22.4791],
[8750000, 4, 0, 7, 1, 25.2778],
[8751645, 0, 6, 1, 4, 31.6318],
[8780800, 10, 0, 2, 3, 24.5712],
[8817984, 6, 9, 0, 1, 22.2453],
[8820000, 5, 2, 4, 2, 24.7991],
[8823675, 0, 1, 2, 6, 33.2565],
[8847360, 16, 3, 1, 0, 20.9543],
[8857350, 1, 11, 2, 0, 26.6772],
[8859375, 0, 4, 6, 1, 30.5567],
[8890560, 6, 4, 1, 3, 25.7703],
[8930250, 1, 6, 3, 2, 31.8759],
[8957952, 12, 7, 0, 0, 19.4119],
[8960000, 11, 0, 4, 1, 22.307],
[9000000, 6, 2, 6, 0, 25.394],
[9001692, 2, 8, 0, 3, 27.9377],
[9003750, 1, 1, 4, 4, 35.1365],
[9031680, 12, 2, 1, 2, 24.83],
[9072000, 7, 4, 3, 1, 22.9636],
[9075780, 2, 3, 1, 5, 34.1216],
[9112500, 2, 6, 5, 0, 28.0062],
[9144576, 8, 6, 0, 2, 24.1841],
[9175040, 18, 0, 1, 1, 23.2175],
[9185400, 3, 8, 2, 1, 25.0048],
[9187500, 2, 1, 6, 2, 32.9428],
[9216000, 13, 2, 3, 0, 23.9052],
[9219840, 8, 1, 1, 4, 30.2537],
[9261000, 3, 3, 3, 3, 30.6697],
[9289728, 14, 4, 0, 1, 21.2122],
[9331200, 9, 6, 2, 0, 24.2426],
[9335088, 4, 5, 0, 4, 28.0774],
[9375000, 3, 1, 8, 0, 30.8311],
[9408000, 9, 1, 3, 2, 29.6678],
[9411920, 4, 0, 1, 6, 31.9982],
[9437184, 20, 2, 0, 0, 20.1543],
[9447840, 5, 10, 1, 0, 26.6009],
[9450000, 4, 3, 5, 1, 27.9534],
[9483264, 10, 3, 0, 3, 27.078],
[9525600, 5, 5, 2, 2, 25.9155],
[9529569, 0, 4, 0, 6, 33.013],
[9565938, 1, 14, 0, 0, 30.8028],
[9568125, 0, 7, 4, 1, 29.7648],
[9600000, 10, 1, 5, 0, 27.4797],
[9604000, 5, 0, 3, 4, 28.7858],
[9633792, 16, 1, 0, 2, 26.3533],
[9644670, 1, 9, 1, 2, 34.2707],
[9646875, 0, 2, 5, 3, 35.3857],
[9676800, 11, 3, 2, 1, 24.6522],
[9680832, 6, 2, 0, 5, 30.3152],
[9720000, 6, 5, 4, 0, 23.5056],
[9724050, 1, 4, 2, 4, 33.7934],
[9765625, 0, 0, 10, 0, 32.1253],
[9797760, 7, 7, 1, 1, 26.3584],
[9800000, 6, 0, 5, 2, 26.6116],
[9830400, 17, 1, 2, 0, 25.5513],
[9834496, 12, 0, 0, 4, 25.0081],
[9841500, 2, 9, 3, 0, 28.4763],
[9843750, 1, 2, 7, 1, 36.5723],
[9878400, 7, 2, 2, 3, 28.2355],
[9882516, 2, 1, 0, 7, 39.693],
[9920232, 3, 11, 0, 1, 27.6579],
[9922500, 2, 4, 4, 2, 29.9309],
[9953280, 13, 5, 1, 0, 26.7729],
[10000000, 7, 0, 7, 0, 24.5461],
[10001880, 3, 6, 1, 3, 34.7747],
[10035200, 13, 0, 2, 2, 23.0579],
[10077696, 9, 9, 0, 0, 24.7336],
[10080000, 8, 2, 4, 1, 27.303],
[10084200, 3, 1, 2, 5, 36.9996],
[10125000, 3, 4, 6, 0, 29.7806],
[10160640, 9, 4, 1, 2, 28.9428],
[10206000, 4, 6, 3, 1, 30.7749],
[10240000, 14, 0, 4, 0, 21.7403],
[10287648, 5, 8, 0, 2, 25.4201],
[10290000, 4, 1, 4, 3, 33.8313],
[10321920, 15, 2, 1, 1, 27.6406],
[10333575, 0, 10, 2, 1, 33.5788],
[10368000, 10, 4, 3, 0, 26.2262],
[10372320, 5, 3, 1, 4, 32.9228],
[10418625, 0, 5, 3, 3, 36.8096],
[10450944, 11, 6, 0, 1, 27.3702],
[10485760, 21, 0, 1, 0, 26.3614],
[10497600, 6, 8, 2, 0, 22.714],
[10500000, 5, 1, 6, 1, 34.423],
[10501974, 1, 7, 0, 4, 36.9656],
[10504375, 0, 0, 4, 5, 37.8089],
[10536960, 11, 1, 1, 3, 34.6491],
[10546875, 0, 3, 8, 0, 36.5675],
[10584000, 6, 3, 3, 2, 29.3069],
[10588410, 1, 2, 1, 6, 43.6987],
[10616832, 17, 4, 0, 0, 26.1758],
[10628820, 2, 12, 1, 0, 30.6068],
[10631250, 1, 5, 5, 1, | |
<filename>src/keras/tests/keras/engine/test_topology.py
import pytest
import json
import numpy as np
from keras.layers import Dense, Dropout, Conv2D, InputLayer
from keras import layers
from keras.engine import Input, Layer, saving, get_source_inputs
from keras.models import Model, Sequential
from keras import backend as K
from keras.models import model_from_json, model_from_yaml
from keras.initializers import Constant
skipif_no_tf_gpu = pytest.mark.skipif(
(K.backend() != 'tensorflow' or
not K.tensorflow_backend._get_available_gpus()),
reason='Requires TensorFlow backend and a GPU')
def test_get_updates_for():
a = Input(shape=(2,))
dense_layer = Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
assert dense_layer.get_updates_for(a) == [0]
assert dense_layer.get_updates_for(None) == [1]
def test_get_losses_for():
a = Input(shape=(2,))
dense_layer = Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
assert dense_layer.get_losses_for(a) == [0]
assert dense_layer.get_losses_for(None) == [1]
def test_trainable_weights():
a = Input(shape=(2,))
b = Dense(1)(a)
model = Model(a, b)
weights = model.weights
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
model.trainable = True
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.layers[1].trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
# sequential model
model = Sequential()
model.add(Dense(1, input_dim=2))
weights = model.weights
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
model.trainable = True
assert model.trainable_weights == weights
assert model.non_trainable_weights == []
model.layers[0].trainable = False
assert model.trainable_weights == []
assert model.non_trainable_weights == weights
def test_valid_compute_mask():
model = Sequential()
model.add(Dense(1, input_dim=2))
assert model.layers[0].supports_masking is True
assert model.layers[0].compute_mask([model.input], [0., 1.]) == [0., 1.]
def test_invalid_compute_mask():
model = Sequential()
model.add(Conv2D(1, [2, 2], input_shape=[3, 3, 1]))
assert model.layers[0].supports_masking is False
assert model.layers[0].compute_mask([model.input], [None]) is None
mask = np.array([[0., 1.], [1., 0.]])
with pytest.raises(TypeError):
model.layers[0].compute_mask([model.input], [mask])
with pytest.raises(TypeError):
model.layers[0].compute_mask([model.input], mask)
def test_get_layer():
model = Sequential()
model.add(Dense(1, input_dim=2))
with pytest.raises(ValueError):
model.get_layer(index=5)
with pytest.raises(ValueError):
model.get_layer(index=None)
with pytest.raises(ValueError):
model.get_layer(name='conv')
def test_learning_phase():
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
a_2 = Dense(16, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
assert not a_2._uses_learning_phase
assert b_2._uses_learning_phase
# test merge
m = layers.concatenate([a_2, b_2])
assert m._uses_learning_phase
# Test recursion
model = Model([a, b], [a_2, b_2])
print(model.input_spec)
assert model.uses_learning_phase
c = Input(shape=(32,), name='input_c')
d = Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
assert c_2._uses_learning_phase
assert b_2._uses_learning_phase
# try actually running graph
fn = K.function(model.inputs + [K.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
assert fn_outputs_no_dp[0].sum() == fn_outputs_dp[0].sum()
# output b: dropout applied
assert fn_outputs_no_dp[1].sum() != fn_outputs_dp[1].sum()
def test_layer_call_arguments():
# Test the ability to pass and serialize arguments to `call`.
inp = layers.Input(shape=(2,))
x = layers.Dense(3)(inp)
x = layers.Dropout(0.5)(x, training=True)
model = Model(inp, x)
assert not model.uses_learning_phase
# Test that argument is kept when applying the model
inp2 = layers.Input(shape=(2,))
out2 = model(inp2)
assert not out2._uses_learning_phase
# Test that argument is kept after loading a model
config = model.get_config()
model = Model.from_config(config)
assert not model.uses_learning_phase
def test_node_construction():
####################################################
# test basics
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
assert a._keras_shape == (None, 32)
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, b_node_index, b_tensor_index = b._keras_history
assert len(a_layer._inbound_nodes) == 1
assert a_tensor_index is 0
node = a_layer._inbound_nodes[a_node_index]
assert node.outbound_layer == a_layer
assert isinstance(node.inbound_layers, list)
assert node.inbound_layers == []
assert isinstance(node.input_tensors, list)
assert node.input_tensors == [a]
assert isinstance(node.input_masks, list)
assert node.input_masks == [None]
assert isinstance(node.input_shapes, list)
assert node.input_shapes == [(None, 32)]
assert isinstance(node.output_tensors, list)
assert node.output_tensors == [a]
assert isinstance(node.output_shapes, list)
assert node.output_shapes == [(None, 32)]
assert isinstance(node.output_masks, list)
assert node.output_masks == [None]
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
assert len(dense._inbound_nodes) == 2
assert len(dense._outbound_nodes) == 0
assert dense._inbound_nodes[0].inbound_layers == [a_layer]
assert dense._inbound_nodes[0].outbound_layer == dense
assert dense._inbound_nodes[1].inbound_layers == [b_layer]
assert dense._inbound_nodes[1].outbound_layer == dense
assert dense._inbound_nodes[0].input_tensors == [a]
assert dense._inbound_nodes[1].input_tensors == [b]
assert dense._inbound_nodes[0].get_config()['inbound_layers'] == ['input_a']
assert dense._inbound_nodes[1].get_config()['inbound_layers'] == ['input_b']
# test layer properties
test_layer = Dense(16, name='test_layer')
a_test = test_layer(a)
assert K.int_shape(test_layer.kernel) == (32, 16)
assert test_layer.input == a
assert test_layer.output == a_test
assert test_layer.input_mask is None
assert test_layer.output_mask is None
assert test_layer.input_shape == (None, 32)
assert test_layer.output_shape == (None, 16)
with pytest.raises(AttributeError):
dense.input
with pytest.raises(AttributeError):
dense.output
with pytest.raises(AttributeError):
dense.input_mask
with pytest.raises(AttributeError):
dense.output_mask
assert dense.get_input_at(0) == a
assert dense.get_input_at(1) == b
assert dense.get_output_at(0) == a_2
assert dense.get_output_at(1) == b_2
assert dense.get_input_shape_at(0) == (None, 32)
assert dense.get_input_shape_at(1) == (None, 32)
assert dense.get_output_shape_at(0) == (None, 16)
assert dense.get_output_shape_at(1) == (None, 16)
assert dense.get_input_mask_at(0) is None
assert dense.get_input_mask_at(1) is None
assert dense.get_output_mask_at(0) is None
assert dense.get_output_mask_at(1) is None
def test_multi_input_layer():
####################################################
# test multi-input layer
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
assert merged._keras_shape == (None, 16 * 2)
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
assert merge_node_index == 0
assert merge_tensor_index == 0
assert len(merge_layer._inbound_nodes) == 1
assert len(merge_layer._outbound_nodes) == 0
assert len(merge_layer._inbound_nodes[0].input_tensors) == 2
assert len(merge_layer._inbound_nodes[0].inbound_layers) == 2
c = Dense(64, name='dense_2')(merged)
d = Dense(5, name='dense_3')(c)
model = Model(inputs=[a, b], outputs=[c, d], name='model')
assert len(model.layers) == 6
expected_shapes = [(None, 64), (None, 5)]
assert model.compute_output_shape([(None, 32), (None, 32)]) == expected_shapes
assert model.compute_mask([a, b], [None, None]) == [None, None]
assert model.compute_output_shape([(None, 32), (None, 32)]) == expected_shapes
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
expected_names = ['dense_1', 'merge', 'dense_2', 'dense_3']
assert [l.name for l in model.layers][2:] == expected_names
assert [l.name for l in model._input_layers] == ['input_a', 'input_b']
assert [l.name for l in model._output_layers] == ['dense_2', 'dense_3']
# actually run model
fn = K.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]
# test get_source_inputs
assert get_source_inputs(c) == [a, b]
# serialization / deserialization
json_config = model.to_json()
recreated_model = model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
assert [l.name for l in recreated_model.layers][2:] == expected_names
assert [l.name for l in recreated_model._input_layers] == ['input_a', 'input_b']
assert [l.name for l in recreated_model._output_layers] == ['dense_2', 'dense_3']
fn = K.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 64), (10, 5)]
def test_recursion():
####################################################
# test recursion
a = Input(shape=(32,), name='input_a')
b = Input(shape=(32,), name='input_b')
dense = Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = Dense(64, name='dense_2')(merged)
d = Dense(5, name='dense_3')(c)
model = Model(inputs=[a, b], outputs=[c, d], name='model')
e = Input(shape=(32,), name='input_e')
f = Input(shape=(32,), name='input_f')
g, h = model([e, f])
# g2, h2 = model([e, f])
assert g._keras_shape == c._keras_shape
assert h._keras_shape == d._keras_shape
# test separate manipulation of different layer outputs
i = Dense(7, name='dense_4')(h)
final_model = Model(inputs=[e, f], outputs=[i, g], name='final')
assert len(final_model.inputs) == 2
assert len(final_model.outputs) == 2
assert len(final_model.layers) == 4
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
expected_shapes = [(10, 7), (10, 64)]
assert [layer.name for layer in final_model.layers][2:] == ['model', 'dense_4']
assert model.compute_mask([e, f], [None, None]) == [None, None]
assert final_model.compute_output_shape([(10, 32), (10, 32)]) == expected_shapes
# run recursive model
fn = K.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]
# test serialization
model_config = final_model.get_config()
print(json.dumps(model_config, indent=4))
recreated_model = Model.from_config(model_config)
fn = K.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
assert [x.shape for x in fn_outputs] == [(10, 7), (10, 64)]
####################################################
# test multi-input multi-output
j = Input(shape=(32,), name='input_j')
k = Input(shape=(32,), name='input_k')
m, n = model([j, k])
o = Input(shape=(32,), name='input_o')
p = Input(shape=(32,), name='input_p')
q, r = model([o, p])
assert n._keras_shape == (None, 5)
assert q._keras_shape == (None, 64)
s = layers.concatenate([n, q], name='merge_nq')
assert s._keras_shape == (None, 64 + 5)
# test with single output as 1-elem list
multi_io_model = Model([j, k, o, p], [s])
fn = K.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))])
assert [x.shape for x in fn_outputs] == [(10, 69)]
# test with single output as tensor
multi_io_model = Model([j, k, o, p], s)
fn | |
<gh_stars>10-100
import os
import sys
import numpy as np
import tensorflow as tf
sys.path.append('util')
import vis_primitive
import vis_pointcloud
from data_loader import *
from encoder import *
from decoder import *
from loss_function import *
tf.app.flags.DEFINE_string('log_dir', 'log/initial_training/PGen_xxxx/0_16_8_4_airplane_0',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_string('train_data',
'data/airplane_octree_points_d5_train.tfrecords',
"""Train data location.""")
tf.app.flags.DEFINE_string('test_data',
'data/airplane_octree_points_d5_test_100.tfrecords',
"""Test data location.""")
tf.app.flags.DEFINE_integer('train_batch_size', 32,
"""Mini-batch size for the training.""")
tf.app.flags.DEFINE_integer('test_batch_size', 1,
"""Mini-batch size for the testing.""")
tf.app.flags.DEFINE_float('learning_rate', 0.001,
"""Initial learning rate.""")
tf.app.flags.DEFINE_integer('max_iter', 100010,
"""Maximum training iterations.""")
tf.app.flags.DEFINE_integer('test_every_n_steps', 1000,
"""Test model every n training steps.""")
tf.app.flags.DEFINE_integer('test_iter', 100,
"""Test steps in testing phase.""")
tf.app.flags.DEFINE_integer('disp_every_n_steps', 5000,
"""Generate mesh every n training steps.""")
tf.app.flags.DEFINE_integer('n_part_1', 16,
"""Number of cuboids to generate.""")
tf.app.flags.DEFINE_integer('n_part_2', 8,
"""Number of cuboids to generate in phase two.""")
tf.app.flags.DEFINE_integer('n_part_3', 4,
"""Number of cuboids to generate in phase three.""")
tf.app.flags.DEFINE_float('coverage_weight', 1,
"""Weight of coverage loss""")
tf.app.flags.DEFINE_float('consistency_weight', 1,
"""Weight of consistency loss""")
tf.app.flags.DEFINE_float('mutex_weight', 1,
"""Weight of mutex loss""")
tf.app.flags.DEFINE_float('aligning_weight', 0.001,
"""Weight of aligning loss""")
tf.app.flags.DEFINE_float('symmetry_weight', 0.1,
"""Weight of symmetry loss""")
tf.app.flags.DEFINE_float('area_average_weight', 5,
"""Weight of cube surface area average loss""")
tf.app.flags.DEFINE_float('shape_bias_1', 0.01, """phase one shape bias""")
tf.app.flags.DEFINE_float('shape_bias_2', 0.005, """phase two shape bias""")
tf.app.flags.DEFINE_float('shape_bias_3', 0.001, """phase three shape bias""")
tf.app.flags.DEFINE_string('cache_folder', 'test',
"""Directory where to dump immediate data.""")
tf.app.flags.DEFINE_string('ckpt', 'None',
"""Restore weights from checkpoint file.""")
tf.app.flags.DEFINE_boolean('test', False, """Test only flags.""")
tf.app.flags.DEFINE_string('gpu', '0', """GPU id.""")
tf.app.flags.DEFINE_integer('num_points_in_points_file', 5000,
"""Number of points sampled on original shape surface.""")
FLAGS = tf.app.flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
max_iter = FLAGS.max_iter
test_iter = FLAGS.test_iter
n_part_1 = FLAGS.n_part_1
n_part_2 = FLAGS.n_part_2
n_part_3 = FLAGS.n_part_3
shape_bias_1 = FLAGS.shape_bias_1
shape_bias_2 = FLAGS.shape_bias_2
shape_bias_3 = FLAGS.shape_bias_3
n_points = FLAGS.num_points_in_points_file
for key, value in tf.app.flags.FLAGS.flag_values_dict().items():
print('{}: {}'.format(key, value))
print('====')
sys.stdout.flush()
def initial_loss_function(cube_params_1, cube_params_2, cube_params_3,
node_position):
with tf.name_scope('initial_loss_function'):
[coverage_distance_1,
cube_volume_1,
consistency_distance_1,
mutex_distance_1,
aligning_distance_1,
symmetry_distance_1,
cube_area_average_distance_1
] = compute_loss_phase_one(cube_params_1, node_position)
loss_1 = (coverage_distance_1 * FLAGS.coverage_weight +
consistency_distance_1 * FLAGS.consistency_weight +
mutex_distance_1 * FLAGS.mutex_weight +
aligning_distance_1 * FLAGS.aligning_weight +
symmetry_distance_1 * FLAGS.symmetry_weight +
cube_area_average_distance_1 * FLAGS.area_average_weight)
[coverage_distance_2,
cube_volume_2,
consistency_distance_2,
mutex_distance_2,
aligning_distance_2,
symmetry_distance_2
] = compute_loss_phase_merge(cube_params_1, cube_params_2, n_part_1,
node_position, phase='two')
loss_2 = (coverage_distance_2 * FLAGS.coverage_weight +
consistency_distance_2 * FLAGS.consistency_weight +
mutex_distance_2 * FLAGS.mutex_weight +
aligning_distance_2 * FLAGS.aligning_weight +
symmetry_distance_2 * FLAGS.symmetry_weight)
[coverage_distance_3,
cube_volume_3,
consistency_distance_3,
mutex_distance_3,
aligning_distance_3,
symmetry_distance_3
] = compute_loss_phase_merge(cube_params_2, cube_params_3, n_part_2,
node_position, phase='three')
loss_3 = (coverage_distance_3 * FLAGS.coverage_weight +
consistency_distance_3 * FLAGS.consistency_weight +
mutex_distance_3 * FLAGS.mutex_weight +
aligning_distance_3 * FLAGS.aligning_weight +
symmetry_distance_3 * FLAGS.symmetry_weight)
return [loss_1,
coverage_distance_1,
cube_volume_1,
consistency_distance_1,
mutex_distance_1,
aligning_distance_1,
symmetry_distance_1,
cube_area_average_distance_1,
loss_2,
coverage_distance_2,
cube_volume_2,
consistency_distance_2,
mutex_distance_2,
aligning_distance_2,
symmetry_distance_2,
loss_3,
coverage_distance_3,
cube_volume_3,
consistency_distance_3,
mutex_distance_3,
aligning_distance_3,
symmetry_distance_3
]
def train_network():
data, octree, node_position = data_loader(FLAGS.train_data,
FLAGS.train_batch_size, n_points)
latent_code = encoder(data, octree, is_training=True, reuse=False)
cube_params_1 = decoder(latent_code, n_part_1, shape_bias_1,
name='decoder_phase_one', is_training=True, reuse=False)
cube_params_2 = decoder(latent_code, n_part_2, shape_bias_2,
name='decoder_phase_two', is_training=True, reuse=False)
cube_params_3 = decoder(latent_code, n_part_3, shape_bias_3,
name='decoder_phase_three', is_training=True, reuse=False)
[train_loss_1,
coverage_distance_1,
cube_volume_1,
consistency_distance_1,
mutex_distance_1,
aligning_distance_1,
symmetry_distance_1,
cube_area_average_distance_1,
train_loss_2,
coverage_distance_2,
cube_volume_2,
consistency_distance_2,
mutex_distance_2,
aligning_distance_2,
symmetry_distance_2,
train_loss_3,
coverage_distance_3,
cube_volume_3,
consistency_distance_3,
mutex_distance_3,
aligning_distance_3,
symmetry_distance_3
] = initial_loss_function(cube_params_1, cube_params_2, cube_params_3,
node_position)
train_loss = train_loss_1 + train_loss_2 + train_loss_3
with tf.name_scope('train_summary'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
tvars = tf.trainable_variables()
encoder_vars = [var for var in tvars if 'encoder' in var.name]
decoder_1_vars = [var for var in tvars if 'phase_one' in var.name]
decoder_2_vars = [var for var in tvars if 'phase_two' in var.name]
decoder_3_vars = [var for var in tvars if 'phase_three' in var.name]
var_list = encoder_vars + decoder_1_vars + decoder_2_vars + decoder_3_vars
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
solver = optimizer.minimize(train_loss, var_list=var_list)
lr = optimizer._lr
summary_lr_scheme = tf.summary.scalar('learning_rate', lr)
summary_train_loss = tf.summary.scalar('train_loss', train_loss)
summary_coverage_distance_1 = tf.summary.scalar('coverage_distance_1', coverage_distance_1)
summary_cube_volume_1 = tf.summary.scalar('cube_volume_1', cube_volume_1)
summary_consistency_distance_1 = tf.summary.scalar('consistency_distance_1', consistency_distance_1)
summary_mutex_distance_1 = tf.summary.scalar('mutex_distance_1', mutex_distance_1)
summary_aligning_distance_1 = tf.summary.scalar('aligning_distance_1', aligning_distance_1)
summary_symmetry_distance_1 = tf.summary.scalar('symmetry_distance_1', symmetry_distance_1)
summary_cube_area_average_distance_1 = tf.summary.scalar('cube_area_average_distance_1', cube_area_average_distance_1)
summary_list_phase_one = [summary_coverage_distance_1,
summary_cube_volume_1,
summary_consistency_distance_1,
summary_mutex_distance_1,
summary_aligning_distance_1,
summary_symmetry_distance_1,
summary_cube_area_average_distance_1]
summary_coverage_distance_2 = tf.summary.scalar('coverage_distance_2', coverage_distance_2)
summary_cube_volume_2 = tf.summary.scalar('cube_volume_2', cube_volume_2)
summary_consistency_distance_2 = tf.summary.scalar('consistency_distance_2', consistency_distance_2)
summary_mutex_distance_2 = tf.summary.scalar('mutex_distance_2', mutex_distance_2)
summary_aligning_distance_2 = tf.summary.scalar('aligning_distance_2', aligning_distance_2)
summary_symmetry_distance_2 = tf.summary.scalar('symmetry_distance_2', symmetry_distance_2)
summary_list_phase_two = [summary_coverage_distance_2,
summary_cube_volume_2,
summary_consistency_distance_2,
summary_mutex_distance_2,
summary_aligning_distance_2,
summary_symmetry_distance_2]
summary_coverage_distance_3 = tf.summary.scalar('coverage_distance_3', coverage_distance_3)
summary_cube_volume_3 = tf.summary.scalar('cube_volume_3', cube_volume_3)
summary_consistency_distance_3 = tf.summary.scalar('consistency_distance_3', consistency_distance_3)
summary_mutex_distance_3 = tf.summary.scalar('mutex_distance_3', mutex_distance_3)
summary_aligning_distance_3 = tf.summary.scalar('aligning_distance_3', aligning_distance_3)
summary_symmetry_distance_3 = tf.summary.scalar('symmetry_distance_3', symmetry_distance_3)
summary_list_phase_three = [summary_coverage_distance_3,
summary_cube_volume_3,
summary_consistency_distance_3,
summary_mutex_distance_3,
summary_aligning_distance_3,
summary_symmetry_distance_3]
total_summary_list = [summary_train_loss, summary_lr_scheme] + \
summary_list_phase_one + summary_list_phase_two + summary_list_phase_three
train_merged = tf.summary.merge(total_summary_list)
return train_merged, solver
def test_network():
data, octree, node_position = data_loader(FLAGS.test_data,
FLAGS.test_batch_size, n_points, test=True)
latent_code = encoder(data, octree, is_training=False, reuse=True)
cube_params_1 = decoder(latent_code, n_part_1, shape_bias_1,
name='decoder_phase_one', is_training=False, reuse=True)
cube_params_2 = decoder(latent_code, n_part_2, shape_bias_2,
name='decoder_phase_two', is_training=False, reuse=True)
cube_params_3 = decoder(latent_code, n_part_3, shape_bias_3,
name='decoder_phase_three', is_training=False, reuse=True)
[test_loss_1, _, _, _, _, _, _, _,
test_loss_2, _, _, _, _, _, _,
test_loss_3, _, _, _, _, _, _
] = initial_loss_function(cube_params_1, cube_params_2, cube_params_3,
node_position)
test_loss = test_loss_1 + test_loss_2 + test_loss_3
with tf.name_scope('test_summary'):
average_test_loss = tf.placeholder(tf.float32)
summary_test_loss = tf.summary.scalar('test_loss',
average_test_loss)
test_merged = tf.summary.merge([summary_test_loss])
return_list = [test_merged,
average_test_loss, test_loss,
node_position,
latent_code,
cube_params_1, cube_params_2, cube_params_3]
return return_list
def main(argv=None):
train_summary, solver = train_network()
[test_summary,
average_test_loss, test_loss,
test_node_position,
test_latent_code,
cube_params_1, cube_params_2, cube_params_3
] = test_network()
# checkpoint
ckpt = tf.train.latest_checkpoint(FLAGS.ckpt)
start_iters = 0 if not ckpt else int(ckpt[ckpt.find('iter') + 4:-5]) + 1
# saver
tvars = tf.trainable_variables()
encoder_vars = [var for var in tvars if 'encoder' in var.name]
decoder_1_vars = [var for var in tvars if 'phase_one' in var.name]
decoder_2_vars = [var for var in tvars if 'phase_two' in var.name]
decoder_3_vars = [var for var in tvars if 'phase_three' in var.name]
restore_vars = encoder_vars + decoder_1_vars + decoder_2_vars + decoder_3_vars
save_vars = encoder_vars + decoder_1_vars + decoder_2_vars + decoder_3_vars
tf_saver = tf.train.Saver(var_list=save_vars, max_to_keep=100)
if ckpt:
assert(os.path.exists(FLAGS.ckpt))
tf_restore_saver = tf.train.Saver(var_list=restore_vars, max_to_keep=100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# tf summary
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# initialize
init = tf.global_variables_initializer()
sess.run(init)
if ckpt:
tf_restore_saver.restore(sess, ckpt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
dump_dir = os.path.join('dump', FLAGS.cache_folder)
if not os.path.exists(dump_dir): os.makedirs(dump_dir)
obj_dir = os.path.join('obj', FLAGS.cache_folder)
if not os.path.exists(obj_dir): os.makedirs(obj_dir)
if FLAGS.test:
for it in range(test_iter):
[test_loss_total_value,
cube_params_1_value, cube_params_2_value, cube_params_3_value,
node_position_value,
latent_code_value
] = sess.run([
test_loss,
cube_params_1, cube_params_2, cube_params_3,
test_node_position,
test_latent_code
])
print('Iter {} loss: {}'.format(it, test_loss_total_value))
with open(os.path.join(dump_dir, 'cube_1_{:04d}.txt'.format(it)), 'w') as f:
z = np.reshape(cube_params_1_value[0], [n_part_1, 3])
q = np.reshape(cube_params_1_value[1], [n_part_1, 4])
t = np.reshape(cube_params_1_value[2], [n_part_1, 3])
for j in range(n_part_1):
f.write('{} {} {} '.format(z[j][0], z[j][1], z[j][2]))
f.write('{} {} {} {} '.format(q[j][0], q[j][1], q[j][2], q[j][3]))
f.write('{} {} {}\n'.format(t[j][0], t[j][1], t[j][2]))
cube_params = np.loadtxt(os.path.join(dump_dir, 'cube_1_{:04d}.txt'.format(it)))
obj_filename = os.path.join(obj_dir, 'cube_1_{:04d}.obj'.format(it))
vis_primitive.save_parts(cube_params, obj_filename, level='1')
with open(os.path.join(dump_dir, 'cube_2_{:04d}.txt'.format(it)), 'w') as f:
z = np.reshape(cube_params_2_value[0], [n_part_2, 3])
q = np.reshape(cube_params_2_value[1], [n_part_2, 4])
t = np.reshape(cube_params_2_value[2], [n_part_2, 3])
for j in range(n_part_2):
f.write('{} {} {} '.format(z[j][0], z[j][1], z[j][2]))
f.write('{} {} {} {} '.format(q[j][0], q[j][1], q[j][2], q[j][3]))
f.write('{} {} {}\n'.format(t[j][0], t[j][1], t[j][2]))
cube_params = np.loadtxt(os.path.join(dump_dir, 'cube_2_{:04d}.txt'.format(it)))
obj_filename = os.path.join(obj_dir, 'cube_2_{:04d}.obj'.format(it))
vis_primitive.save_parts(cube_params, obj_filename, level='2')
with open(os.path.join(dump_dir, 'cube_3_{:04d}.txt'.format(it)), 'w') as f:
z = np.reshape(cube_params_3_value[0], [n_part_3, 3])
q = np.reshape(cube_params_3_value[1], [n_part_3, 4])
t = np.reshape(cube_params_3_value[2], [n_part_3, 3])
for j in range(n_part_3):
f.write('{} {} {} '.format(z[j][0], z[j][1], z[j][2]))
f.write('{} {} {} {} '.format(q[j][0], q[j][1], q[j][2], q[j][3]))
f.write('{} {} {}\n'.format(t[j][0], t[j][1], t[j][2]))
cube_params = np.loadtxt(os.path.join(dump_dir, 'cube_3_{:04d}.txt'.format(it)))
obj_filename = os.path.join(obj_dir, 'cube_3_{:04d}.obj'.format(it))
vis_primitive.save_parts(cube_params, obj_filename, level='3')
np.savetxt(os.path.join(dump_dir, 'node_position_{:04d}.txt'.format(it)), node_position_value)
np.savetxt(os.path.join(dump_dir, 'latent_code_{:04d}.txt'.format(it)), np.reshape(latent_code_value, [-1]))
# pc_filename = os.path.join(obj_dir, 'pc_{:04d}.obj'.format(it))
# vis_pointcloud.save_points(np.transpose(node_position_value),
# pc_filename, depth=6)
else:
# start training
for i in range(start_iters, max_iter):
if coord.should_stop():
break
if i % FLAGS.test_every_n_steps == 0:
avg_test_loss = 0
for it in range(test_iter):
[test_loss_total_value,
cube_params_1_value, cube_params_2_value, cube_params_3_value,
node_position_value
] = sess.run([
test_loss,
cube_params_1, cube_params_2, cube_params_3,
test_node_position])
avg_test_loss += test_loss_total_value
if i % FLAGS.disp_every_n_steps == 0:
with open(os.path.join(dump_dir, 'cube_1_{:06d}_{:04d}.txt'.format(i, it)), 'w') as f:
z = np.reshape(cube_params_1_value[0], [n_part_1, 3])
q = np.reshape(cube_params_1_value[1], [n_part_1, 4])
t = np.reshape(cube_params_1_value[2], [n_part_1, 3])
for j in range(n_part_1):
f.write('{} {} {} '.format(z[j][0], z[j][1], z[j][2]))
f.write('{} {} {} {} '.format(q[j][0], q[j][1], q[j][2], q[j][3]))
f.write('{} {} {}\n'.format(t[j][0], t[j][1], t[j][2]))
cube_params = np.loadtxt(os.path.join(dump_dir, 'cube_1_{:06d}_{:04d}.txt'.format(i, it)))
obj_filename = os.path.join(obj_dir, 'cube_1_{:06d}_{:04d}.obj'.format(i, it))
vis_primitive.save_parts(cube_params, obj_filename, level='1')
with open(os.path.join(dump_dir, 'cube_2_{:06d}_{:04d}.txt'.format(i, it)), 'w') as f:
z = np.reshape(cube_params_2_value[0], [n_part_2, 3])
q = np.reshape(cube_params_2_value[1], [n_part_2, 4])
t = np.reshape(cube_params_2_value[2], [n_part_2, 3])
for j in range(n_part_2):
f.write('{} {} {} '.format(z[j][0], z[j][1], z[j][2]))
f.write('{} {} {} {} '.format(q[j][0], q[j][1], q[j][2], q[j][3]))
f.write('{} {} {}\n'.format(t[j][0], t[j][1], t[j][2]))
cube_params = np.loadtxt(os.path.join(dump_dir, 'cube_2_{:06d}_{:04d}.txt'.format(i, it)))
obj_filename = os.path.join(obj_dir, 'cube_2_{:06d}_{:04d}.obj'.format(i, it))
vis_primitive.save_parts(cube_params, obj_filename, level='2')
with open(os.path.join(dump_dir, 'cube_3_{:06d}_{:04d}.txt'.format(i, it)), 'w') as f:
z = np.reshape(cube_params_3_value[0], [n_part_3, 3])
q = np.reshape(cube_params_3_value[1], [n_part_3, 4])
t = np.reshape(cube_params_3_value[2], [n_part_3, 3])
for j | |
<reponame>erich666/apitrace
#!/usr/bin/env python
##########################################################################
#
# Copyright 2011 <NAME>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
import difflib
import itertools
import optparse
import os.path
import platform
import shutil
import subprocess
import sys
import tempfile
##########################################################################/
#
# Abstract interface
#
class Differ:
def __init__(self, apitrace):
self.apitrace = apitrace
self.isatty = sys.stdout.isatty()
def setRefTrace(self, ref_trace, ref_calls):
raise NotImplementedError
def setSrcTrace(self, src_trace, src_calls):
raise NotImplementedError
def diff(self):
raise NotImplementedError
##########################################################################/
#
# External diff tool
#
class AsciiDumper:
def __init__(self, apitrace, trace, calls, callNos):
self.output = tempfile.NamedTemporaryFile()
dump_args = [
apitrace,
'dump',
'--color=never',
'--call-nos=' + ('yes' if callNos else 'no'),
'--arg-names=no',
'--calls=' + calls,
trace
]
self.dump = subprocess.Popen(
args = dump_args,
stdout = self.output,
universal_newlines = True,
)
class ExternalDiffer(Differ):
if platform.system() == 'Windows':
start_delete = ''
end_delete = ''
start_insert = ''
end_insert = ''
else:
start_delete = '\33[9m\33[31m'
end_delete = '\33[0m'
start_insert = '\33[32m'
end_insert = '\33[0m'
def __init__(self, apitrace, tool, width=None, callNos = False):
Differ.__init__(self, apitrace)
self.diff_args = [tool]
if tool == 'diff':
self.diff_args += [
'--speed-large-files',
]
if self.isatty:
self.diff_args += [
'--old-line-format=' + self.start_delete + '%l' + self.end_delete + '\n',
'--new-line-format=' + self.start_insert + '%l' + self.end_insert + '\n',
]
elif tool == 'sdiff':
if width is None:
import curses
curses.setupterm()
width = curses.tigetnum('cols')
self.diff_args += [
'--width=%u' % width,
'--speed-large-files',
]
elif tool == 'wdiff':
self.diff_args += [
#'--terminal',
'--avoid-wraps',
]
if self.isatty:
self.diff_args += [
'--start-delete=' + self.start_delete,
'--end-delete=' + self.end_delete,
'--start-insert=' + self.start_insert,
'--end-insert=' + self.end_insert,
]
else:
assert False
self.callNos = callNos
def setRefTrace(self, ref_trace, ref_calls):
self.ref_dumper = AsciiDumper(self.apitrace, ref_trace, ref_calls, self.callNos)
def setSrcTrace(self, src_trace, src_calls):
self.src_dumper = AsciiDumper(self.apitrace, src_trace, src_calls, self.callNos)
def diff(self):
diff_args = self.diff_args + [
self.ref_dumper.output.name,
self.src_dumper.output.name,
]
self.ref_dumper.dump.wait()
self.src_dumper.dump.wait()
less = None
if self.isatty:
less = subprocess.Popen(
args = ['less', '-FRXn'],
stdin = subprocess.PIPE
)
diff_stdout = less.stdin
else:
diff_stdout = None
diff = subprocess.Popen(
args = diff_args,
stdout = diff_stdout,
universal_newlines = True,
)
diff.wait()
if less is not None:
less.stdin.close()
less.wait()
##########################################################################/
#
# Python diff
#
from unpickle import Unpickler, Dumper, Rebuilder
from highlight import PlainHighlighter, LessHighlighter
ignoredFunctionNames = set([
'glGetString',
'glXGetClientString',
'glXGetCurrentDisplay',
'glXGetCurrentContext',
'glXGetProcAddress',
'glXGetProcAddressARB',
'wglGetProcAddress',
])
class Blob:
'''Data-less proxy for bytearrays, to save memory.'''
def __init__(self, size, hash):
self.size = size
self.hash = hash
def __repr__(self):
return 'blob(%u)' % self.size
def __eq__(self, other):
return isinstance(other, Blob) and self.size == other.size and self.hash == other.hash
def __hash__(self):
return self.hash
class BlobReplacer(Rebuilder):
'''Replace blobs with proxys.'''
def visitByteArray(self, obj):
return Blob(len(obj), hash(str(obj)))
def visitCall(self, call):
call.args = map(self.visit, call.args)
call.ret = self.visit(call.ret)
class Loader(Unpickler):
def __init__(self, stream):
Unpickler.__init__(self, stream)
self.calls = []
self.rebuilder = BlobReplacer()
def handleCall(self, call):
if call.functionName not in ignoredFunctionNames:
self.rebuilder.visitCall(call)
self.calls.append(call)
class PythonDiffer(Differ):
def __init__(self, apitrace, callNos = False):
Differ.__init__(self, apitrace)
self.a = None
self.b = None
if self.isatty:
self.highlighter = LessHighlighter()
else:
self.highlighter = PlainHighlighter()
self.delete_color = self.highlighter.red
self.insert_color = self.highlighter.green
self.callNos = callNos
self.aSpace = 0
self.bSpace = 0
self.dumper = Dumper()
def setRefTrace(self, ref_trace, ref_calls):
self.a = self.readTrace(ref_trace, ref_calls)
def setSrcTrace(self, src_trace, src_calls):
self.b = self.readTrace(src_trace, src_calls)
def readTrace(self, trace, calls):
p = subprocess.Popen(
args = [
self.apitrace,
'pickle',
'--symbolic',
'--calls=' + calls,
trace
],
stdout = subprocess.PIPE,
)
parser = Loader(p.stdout)
parser.parse()
return parser.calls
def diff(self):
try:
self._diff()
except IOError:
pass
def _diff(self):
matcher = difflib.SequenceMatcher(self.isjunk, self.a, self.b)
for tag, alo, ahi, blo, bhi in matcher.get_opcodes():
if tag == 'replace':
self.replace(alo, ahi, blo, bhi)
elif tag == 'delete':
self.delete(alo, ahi, blo, bhi)
elif tag == 'insert':
self.insert(alo, ahi, blo, bhi)
elif tag == 'equal':
self.equal(alo, ahi, blo, bhi)
else:
raise ValueError, 'unknown tag %s' % (tag,)
def isjunk(self, call):
return call.functionName == 'glGetError' and call.ret in ('GL_NO_ERROR', 0)
def replace(self, alo, ahi, blo, bhi):
assert alo < ahi and blo < bhi
a_names = [call.functionName for call in self.a[alo:ahi]]
b_names = [call.functionName for call in self.b[blo:bhi]]
matcher = difflib.SequenceMatcher(None, a_names, b_names)
for tag, _alo, _ahi, _blo, _bhi in matcher.get_opcodes():
_alo += alo
_ahi += alo
_blo += blo
_bhi += blo
if tag == 'replace':
self.replace_dissimilar(_alo, _ahi, _blo, _bhi)
elif tag == 'delete':
self.delete(_alo, _ahi, _blo, _bhi)
elif tag == 'insert':
self.insert(_alo, _ahi, _blo, _bhi)
elif tag == 'equal':
self.replace_similar(_alo, _ahi, _blo, _bhi)
else:
raise ValueError, 'unknown tag %s' % (tag,)
def replace_similar(self, alo, ahi, blo, bhi):
assert alo < ahi and blo < bhi
assert ahi - alo == bhi - blo
for i in xrange(0, bhi - blo):
self.highlighter.write('| ')
a_call = self.a[alo + i]
b_call = self.b[blo + i]
assert a_call.functionName == b_call.functionName
self.dumpCallNos(a_call.no, b_call.no)
self.highlighter.bold(True)
self.highlighter.write(b_call.functionName)
self.highlighter.bold(False)
self.highlighter.write('(')
sep = ''
numArgs = max(len(a_call.args), len(b_call.args))
for j in xrange(numArgs):
self.highlighter.write(sep)
try:
a_arg = a_call.args[j]
except IndexError:
pass
try:
b_arg = b_call.args[j]
except IndexError:
pass
self.replace_value(a_arg, b_arg)
sep = ', '
self.highlighter.write(')')
if a_call.ret is not None or b_call.ret is not None:
self.highlighter.write(' = ')
self.replace_value(a_call.ret, b_call.ret)
self.highlighter.write('\n')
def replace_dissimilar(self, alo, ahi, blo, bhi):
assert alo < ahi and blo < bhi
if bhi - blo < ahi - alo:
self.insert(alo, alo, blo, bhi)
self.delete(alo, ahi, bhi, bhi)
else:
self.delete(alo, ahi, blo, blo)
self.insert(ahi, ahi, blo, bhi)
def replace_value(self, a, b):
if b == a:
self.highlighter.write(self.dumper.visit(b))
else:
self.highlighter.strike()
self.highlighter.color(self.delete_color)
self.highlighter.write(self.dumper.visit(a))
self.highlighter.normal()
self.highlighter.write(" ")
self.highlighter.color(self.insert_color)
self.highlighter.write(self.dumper.visit(b))
self.highlighter.normal()
escape = "\33["
def delete(self, alo, ahi, blo, bhi):
assert alo < ahi
assert blo == bhi
for i in xrange(alo, ahi):
call = self.a[i]
self.highlighter.write('- ')
self.dumpCallNos(call.no, None)
self.highlighter.strike()
self.highlighter.color(self.delete_color)
self.dumpCall(call)
def insert(self, alo, ahi, blo, bhi):
assert alo == ahi
assert blo < bhi
for i in xrange(blo, bhi):
call = self.b[i]
self.highlighter.write('+ ')
self.dumpCallNos(None, call.no)
self.highlighter.color(self.insert_color)
self.dumpCall(call)
def equal(self, alo, ahi, blo, bhi):
assert alo < ahi and blo < bhi
assert ahi - alo == bhi - blo
for i in xrange(0, bhi - blo):
self.highlighter.write(' ')
a_call = self.a[alo + i]
b_call = self.b[blo + i]
assert a_call.functionName == b_call.functionName
assert len(a_call.args) == len(b_call.args)
self.dumpCallNos(a_call.no, b_call.no)
self.dumpCall(b_call)
def dumpCallNos(self, aNo, bNo):
if not self.callNos:
return
if aNo is None:
self.highlighter.write(' '*self.aSpace)
else:
aNoStr = str(aNo)
self.highlighter.strike()
self.highlighter.color(self.delete_color)
self.highlighter.write(aNoStr)
self.highlighter.normal()
self.aSpace = len(aNoStr)
self.highlighter.write(' ')
if bNo is None:
self.highlighter.write(' '*self.bSpace)
else:
bNoStr = str(bNo)
self.highlighter.color(self.insert_color)
self.highlighter.write(bNoStr)
self.highlighter.normal()
self.bSpace = len(bNoStr)
self.highlighter.write(' ')
def dumpCall(self, call):
self.highlighter.bold(True)
self.highlighter.write(call.functionName)
self.highlighter.bold(False)
self.highlighter.write('(' + ', '.join(itertools.imap(self.dumper.visit, call.args)) + ')')
if call.ret is not None:
self.highlighter.write(' = ' + self.dumper.visit(call.ret))
self.highlighter.normal()
self.highlighter.write('\n')
##########################################################################/
#
# Main program
#
def which(executable):
'''Search for the executable on the PATH.'''
if platform.system() == 'Windows':
exts = ['.exe']
else:
exts = ['']
dirs = os.environ['PATH'].split(os.path.pathsep)
for dir in dirs:
path = os.path.join(dir, executable)
for ext in exts:
if os.path.exists(path + ext):
return True
return False
def main():
'''Main program.
'''
# Parse command line options
optparser = optparse.OptionParser(
usage='\n\t%prog [options] TRACE TRACE',
version='%%prog')
optparser.add_option(
'-a', '--apitrace', metavar='PROGRAM',
type='string', dest='apitrace', default='apitrace',
help='apitrace command [default: %default]')
optparser.add_option(
'-t', '--tool', metavar='TOOL',
type="choice", choices=('diff', 'sdiff', 'wdiff', 'python'),
dest="tool", default=None,
help="diff tool: diff, sdiff, wdiff, or python [default: auto]")
optparser.add_option(
'-c', '--calls', metavar='CALLSET',
type="string", dest="calls", default='0-10000',
help="calls to compare [default: %default]")
optparser.add_option(
'--ref-calls', metavar='CALLSET',
type="string", dest="ref_calls", default=None,
help="calls to compare from reference trace")
optparser.add_option(
'--src-calls', metavar='CALLSET',
type="string", dest="src_calls", default=None,
| |
<reponame>WertiaCoffee/GeminiMotorDrive
# Copyright 2014-2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the GeminiMotorDrive package, a Python package for controlling a
Parker Hannifin Gemini GV-6 and GT-6 servo and stepper motor drives.
Version 0.2
"""
__version__ = "0.2"
import math
import copy
import re
from . import drivers, utilities
class GeminiError(IOError):
"""Base exception class for this module."""
pass
class CommandError(GeminiError):
"""Exception executing a command."""
pass
def get_driver(driver='ASCII_RS232', *args, **keywords):
""" Gets a driver for a Parker Motion Gemini drive.
Gets and connects a particular driver in ``drivers`` to a Parker
Motion Gemini GV-6 or GT-6 servo/stepper motor drive.
The only driver currently supported is the ``'ASCII_RS232'`` driver
which corresponds to ``drivers.ASCII_RS232``.
Parameters
----------
driver : str, optional
The driver to communicate to the particular driver with, which
includes the hardware connection and possibly the communications
protocol. The only driver currently supported is the
``'ASCII_RS232'`` driver which corresponds to
``drivers.ASCII_RS232``.
*args : additional positional arguments
Additional positional arguments to pass onto the constructor for
the driver.
**keywords : additional keyword arguments
Additional keyword arguments to pass onto the constructor for
the driver.
Returns
-------
drivers : drivers
The connected drivers class that is connected to the drive.
Raises
------
NotImplementedError
If the `driver` is not supported.
See Also
--------
drivers
drivers.ASCII_RS232
"""
if driver.upper() == 'ASCII_RS232':
return drivers.ASCII_RS232(*args, **keywords)
else:
raise NotImplementedError('Driver not supported: '
+ str(driver))
class GeminiG6(object):
""" Controller for a Parker Motion Gemini GV-6 or GT-6.
An object to connect to and control a Parker Motion Gemini GV-6 or
GT-6 servo/stepper motor drive already connected to with a
particular `driver`.
Parameters
----------
driver : driver
Connected instance of a class in ``drivers``. Use ``get_driver``
to load one. Is stored in the attribute ``driver``.
Raises
------
GeminiError
If the attached device is not a Gemini GV-6 or GT-6.
Attributes
----------
driver : driver
Driver for communicating to the drive.
energized : bool
denergize_on_kill : bool
encoder_resolution : int
electrical_pitch : float
max_velocity : float
motion_commanded : bool
See Also
--------
get_driver
"""
def __init__(self, driver):
#: Driver for communicating to the drive.
#:
#: driver
#:
#: A class from ``GeminiMotorDriver.drivers``. Can be loaded
#: using ``get_driver``.
#:
#: See Also
#: --------
#: get_driver
self.driver = driver
# Make sure that it is indeed a GV/T6, and throw an exception
# otherwise. It should respond to the 'TREV' command with 'TREV'
# echoed and '*TREV-GV6-L3E_D1.50_F1.00' where everything after
# the 'GV6' (possibly replaced with a 'GT6') part is model
# dependent.
response = self.driver.send_command('TREV', timeout=1.0,
immediate=True)
if re.search('^!TREV\r\\*TREV-G[VT]{1}6', response[1]) is None:
raise GeminiError('Not a valid Gemini GV-6 or GT-6 device.')
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str)
def _set_parameter(self, name, value, tp, timeout=1.0,
max_retries=2):
""" Sets the specified drive parameter.
Sets a parameter on the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to set. It is always the command to
set it when followed by the value.
value : bool, int, or float
Value to set the parameter to.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
success : bool
Whether the last attempt to set the parameter was successful
(``True``) or not (``False`` meaning it had an error).
See Also
--------
_get_parameter : Get a parameter.
"""
# Return False if tp isn't one of the valid types.
if tp not in (bool, int, float):
return False
# Convert value to the string that the drive will expect. value
# must first be converted to the proper type before getting
# converted to str in the usual fasion. As bools need to be a
# '1' or a '0', it must be converted to int before going through
# str.
if tp == bool:
value_str = str(int(bool(value)))
elif tp == int:
value_str = str(int(value))
elif tp == float:
value_str = str(float(value))
# Immediately set the named parameter of the drive. The command
# is just the parameter name followed by the value string.
response = self.driver.send_command(name+value_str, \
timeout=timeout, immediate=True, max_retries=max_retries)
# Return whether the setting was successful or not.
return not self.driver.command_error(response)
def pause(self, max_retries=0):
""" Pauses the drive (execution of commands).
Causes the drive to pause execution of commands till it is
unpaused. Commands will be queued until it is unpaused. Motion
is not stopped.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to pause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last pause command (last try or retry) was
successful (``True``) or not (``False`` meaning it had an
error).
Notes
-----
The command sent to the drive is '!C'.
See Also
--------
unpause : Unpause the drive.
"""
return (not self.driver.command_error(
self.driver.send_command('PS', timeout=1.0,
immediate=True, max_retries=max_retries)))
def unpause(self, max_retries=0):
""" Unpauses the drive.
Unpauses the drive. Commands queued while it is paused will then
be executed.
Parameters
----------
max_retries : int, optional
Maximum number of retries to do to unpause the drive in the
case of errors.
Returns
-------
success : bool
Whether the last | |
<filename>tests/test_steady_db.py<gh_stars>100-1000
"""Test the SteadyDB module.
Note:
We do not test any real DB-API 2 module, but we just
mock the basic DB-API 2 connection functionality.
Copyright and credit info:
* This test was contributed by <NAME>
"""
import unittest
from . import mock_db as dbapi
from dbutils.steady_db import (
connect as SteadyDBconnect, SteadyDBConnection, SteadyDBCursor)
class TestSteadyDB(unittest.TestCase):
def test_version(self):
from dbutils import __version__, steady_db
self.assertEqual(steady_db.__version__, __version__)
self.assertEqual(steady_db.SteadyDBConnection.version, __version__)
def test_mocked_connection(self):
db = dbapi.connect(
'SteadyDBTestDB', user='SteadyDBTestUser')
db.__class__.has_ping = False
db.__class__.num_pings = 0
self.assertTrue(hasattr(db, 'database'))
self.assertEqual(db.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db, 'user'))
self.assertEqual(db.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertTrue(hasattr(db, 'open_cursors'))
self.assertTrue(hasattr(db, 'num_uses'))
self.assertTrue(hasattr(db, 'num_queries'))
self.assertTrue(hasattr(db, 'session'))
self.assertTrue(hasattr(db, 'valid'))
self.assertTrue(db.valid)
self.assertEqual(db.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db.open_cursors, 1)
cursor.close()
self.assertEqual(db.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db.open_cursors, i + 1)
del cursor
self.assertEqual(db.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(3):
self.assertEqual(db.num_uses, i)
self.assertEqual(db.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db.open_cursors, 0)
self.assertEqual(db.num_uses, 7)
self.assertEqual(db.num_queries, 3)
self.assertRaises(dbapi.InternalError, cursor.close)
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test')
self.assertTrue(db.valid)
self.assertFalse(db.__class__.has_ping)
self.assertEqual(db.__class__.num_pings, 0)
self.assertRaises(AttributeError, db.ping)
self.assertEqual(db.__class__.num_pings, 1)
db.__class__.has_ping = True
self.assertIsNone(db.ping())
self.assertEqual(db.__class__.num_pings, 2)
db.close()
self.assertFalse(db.valid)
self.assertEqual(db.num_uses, 0)
self.assertEqual(db.num_queries, 0)
self.assertRaises(dbapi.InternalError, db.close)
self.assertRaises(dbapi.InternalError, db.cursor)
self.assertRaises(dbapi.OperationalError, db.ping)
self.assertEqual(db.__class__.num_pings, 3)
db.__class__.has_ping = False
db.__class__.num_pings = 0
def test_broken_connection(self):
self.assertRaises(TypeError, SteadyDBConnection, None)
self.assertRaises(TypeError, SteadyDBCursor, None)
db = SteadyDBconnect(dbapi, database='ok')
for i in range(3):
db.close()
del db
self.assertRaises(
dbapi.OperationalError, SteadyDBconnect, dbapi, database='error')
db = SteadyDBconnect(dbapi, database='ok')
cursor = db.cursor()
for i in range(3):
cursor.close()
cursor = db.cursor('ok')
for i in range(3):
cursor.close()
self.assertRaises(dbapi.OperationalError, db.cursor, 'error')
def test_close(self):
for closeable in (False, True):
db = SteadyDBconnect(dbapi, closeable=closeable)
self.assertTrue(db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db._close()
self.assertFalse(db._con.valid)
db._close()
self.assertFalse(db._con.valid)
def test_connection(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertTrue(isinstance(db, SteadyDBConnection))
self.assertTrue(hasattr(db, '_con'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'valid'))
self.assertTrue(db._con.valid)
self.assertTrue(hasattr(db._con, 'cursor'))
self.assertTrue(hasattr(db._con, 'close'))
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertTrue(hasattr(db._con, 'session'))
self.assertTrue(hasattr(db._con, 'database'))
self.assertEqual(db._con.database, 'SteadyDBTestDB')
self.assertTrue(hasattr(db._con, 'user'))
self.assertEqual(db._con.user, 'SteadyDBTestUser')
self.assertTrue(hasattr(db, 'cursor'))
self.assertTrue(hasattr(db, 'close'))
self.assertEqual(db._con.open_cursors, 0)
for i in range(3):
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
cursor = []
for i in range(3):
cursor.append(db.cursor())
self.assertEqual(db._con.open_cursors, i + 1)
del cursor
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertTrue(hasattr(cursor, 'execute'))
self.assertTrue(hasattr(cursor, 'fetchone'))
self.assertTrue(hasattr(cursor, 'callproc'))
self.assertTrue(hasattr(cursor, 'close'))
self.assertTrue(hasattr(cursor, 'valid'))
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(3):
self.assertEqual(db._usage, i)
self.assertEqual(db._con.num_uses, i)
self.assertEqual(db._con.num_queries, i)
cursor.execute(f'select test{i}')
self.assertEqual(cursor.fetchone(), f'test{i}')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
for i in range(4):
cursor.callproc('test')
cursor.close()
self.assertFalse(cursor.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 7)
self.assertEqual(db._con.num_uses, 7)
self.assertEqual(db._con.num_queries, 3)
cursor.close()
cursor.execute('select test8')
self.assertTrue(cursor.valid)
self.assertEqual(db._con.open_cursors, 1)
self.assertEqual(cursor.fetchone(), 'test8')
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 8)
self.assertEqual(db._con.num_queries, 4)
self.assertTrue(db._con.valid)
db.close()
self.assertFalse(db._con.valid)
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 8)
self.assertEqual(db._con.num_uses, 0)
self.assertEqual(db._con.num_queries, 0)
self.assertRaises(dbapi.InternalError, db._con.close)
db.close()
self.assertRaises(dbapi.InternalError, db._con.cursor)
cursor = db.cursor()
self.assertTrue(db._con.valid)
cursor.execute('select test11')
self.assertEqual(cursor.fetchone(), 'test11')
cursor.execute('select test12')
self.assertEqual(cursor.fetchone(), 'test12')
cursor.callproc('test')
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 2)
cursor2 = db.cursor()
self.assertEqual(db._con.open_cursors, 2)
cursor2.execute('select test13')
self.assertEqual(cursor2.fetchone(), 'test13')
self.assertEqual(db._con.num_queries, 3)
db.close()
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._con.num_queries, 0)
cursor = db.cursor()
self.assertTrue(cursor.valid)
cursor.callproc('test')
cursor._cursor.valid = False
self.assertFalse(cursor.valid)
self.assertRaises(dbapi.InternalError, cursor._cursor.callproc, 'test')
cursor.callproc('test')
self.assertTrue(cursor.valid)
cursor._cursor.callproc('test')
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 3)
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
self.assertTrue(cursor.valid)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
cursor.execute('set this')
db.commit()
cursor.execute('set that')
db.rollback()
self.assertEqual(
db._con.session, ['this', 'commit', 'that', 'rollback'])
def test_connection_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.session, [])
with db as con:
con.cursor().execute('select test')
self.assertEqual(db._con.session, ['commit'])
try:
with db as con:
con.cursor().execute('error')
except dbapi.ProgrammingError:
error = True
else:
error = False
self.assertTrue(error)
self.assertEqual(db._con.session, ['commit', 'rollback'])
def test_cursor_context_handler(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
with db.cursor() as cursor:
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
self.assertEqual(cursor.fetchone(), 'test')
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_provided(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
_cursor = cursor._cursor
try:
assert not hasattr(_cursor, 'iter')
_cursor.__iter__ = lambda: ['test-iter']
assert list(iter(cursor)) == ['test']
finally:
del _cursor.__iter__
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_cursor_as_iterator_created(self):
db = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db._con.open_cursors, 0)
cursor = db.cursor()
self.assertEqual(db._con.open_cursors, 1)
cursor.execute('select test')
assert list(iter(cursor)) == ['test']
cursor.close()
self.assertEqual(db._con.open_cursors, 0)
def test_connection_creator_function(self):
db1 = SteadyDBconnect(
dbapi, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
db2 = SteadyDBconnect(
dbapi.connect, 0, None, None, None, True,
'SteadyDBTestDB', user='SteadyDBTestUser')
self.assertEqual(db1.dbapi(), db2.dbapi())
self.assertEqual(db1.threadsafety(), db2.threadsafety())
self.assertEqual(db1._creator, db2._creator)
self.assertEqual(db1._args, db2._args)
self.assertEqual(db1._kwargs, db2._kwargs)
db2.close()
db1.close()
def test_connection_maxusage(self):
db = SteadyDBconnect(dbapi, 10)
cursor = db.cursor()
for i in range(100):
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
self.assertTrue(db._con.valid)
j = i % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
self.assertEqual(db._con.open_cursors, 1)
db.begin()
for i in range(100):
cursor.callproc('test')
self.assertTrue(db._con.valid)
if i == 49:
db.commit()
j = i % 10 + 1 if i > 49 else i + 11
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 0 if i > 49 else 10
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 7:
db._con.valid = cursor._cursor.valid = False
cursor.execute(f'select test{i}')
r = cursor.fetchone()
self.assertEqual(r, f'test{i}')
j = i % 7 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
for i in range(10):
if i == 5:
db._con.valid = cursor._cursor.valid = False
cursor.callproc('test')
j = (i + (3 if i < 5 else -5)) % 10 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
j = 3 if i < 5 else 0
self.assertEqual(db._con.num_queries, j)
db.close()
cursor.execute('select test1')
self.assertEqual(cursor.fetchone(), 'test1')
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 1)
self.assertEqual(db._con.num_queries, 1)
def test_connection_setsession(self):
db = SteadyDBconnect(dbapi, 3, ('set time zone', 'set datestyle'))
self.assertTrue(hasattr(db, '_usage'))
self.assertEqual(db._usage, 0)
self.assertTrue(hasattr(db._con, 'open_cursors'))
self.assertEqual(db._con.open_cursors, 0)
self.assertTrue(hasattr(db._con, 'num_uses'))
self.assertEqual(db._con.num_uses, 2)
self.assertTrue(hasattr(db._con, 'num_queries'))
self.assertEqual(db._con.num_queries, 0)
self.assertTrue(hasattr(db._con, 'session'))
self.assertEqual(tuple(db._con.session), ('time zone', 'datestyle'))
for i in range(11):
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 2)
self.assertEqual(db._con.num_uses, 4)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 3)
self.assertEqual(db._con.num_uses, 5)
self.assertEqual(db._con.num_queries, 2)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('set test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 0)
self.assertEqual(db._con.session, ['time zone', 'datestyle', 'test'])
db.close()
db.cursor().execute('select test')
self.assertEqual(db._con.open_cursors, 0)
self.assertEqual(db._usage, 1)
self.assertEqual(db._con.num_uses, 3)
self.assertEqual(db._con.num_queries, 1)
self.assertEqual(db._con.session, ['time zone', 'datestyle'])
def test_connection_failures(self):
db = SteadyDBconnect(dbapi)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.InternalError)
db.close()
db.cursor()
db = SteadyDBconnect(dbapi, failures=dbapi.OperationalError)
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError))
db.close()
self.assertRaises(dbapi.InternalError, db.cursor)
db = SteadyDBconnect(dbapi, failures=(
dbapi.OperationalError, dbapi.InterfaceError, dbapi.InternalError))
db.close()
db.cursor()
def test_connection_failure_error(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
db.close()
cursor.execute('select test')
cursor = db.cursor()
db.close()
self.assertRaises(dbapi.ProgrammingError, cursor.execute, 'error')
def test_connection_set_sizes(self):
db = SteadyDBconnect(dbapi)
cursor = db.cursor()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([7, 42, 6])
cursor.setoutputsize(9)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 7)
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([7, 42, 6], {None: 9, 3: 15, 7: 42}))
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([], {}))
cursor.setinputsizes([6, 42, 7])
cursor.setoutputsize(7)
cursor.setoutputsize(15, 3)
cursor.setoutputsize(42, 9)
db.close()
cursor.execute('get sizes')
result = cursor.fetchone()
self.assertEqual(result, ([6, 42, 7], {None: 7, 3: 15, 9: 42}))
def test_connection_ping_check(self):
Connection = dbapi.Connection
Connection.has_ping = False
Connection.num_pings = 0
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 0)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 1)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertIsNone(db._ping_check())
self.assertEqual(Connection.num_pings, 2)
Connection.has_ping = True
db = SteadyDBconnect(dbapi)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 2)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 3)
db = SteadyDBconnect(dbapi, ping=1)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 3)
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 4)
db.close()
self.assertTrue(db._ping_check())
self.assertEqual(Connection.num_pings, 5)
db = SteadyDBconnect(dbapi, ping=7)
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 7)
db.close()
db.cursor().execute('select test')
self.assertEqual(Connection.num_pings, 9)
db = SteadyDBconnect(dbapi, ping=3)
self.assertEqual(Connection.num_pings, 9)
db.cursor()
self.assertEqual(Connection.num_pings, 10)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 11)
db = SteadyDBconnect(dbapi, ping=5)
self.assertEqual(Connection.num_pings, 11)
db.cursor()
self.assertEqual(Connection.num_pings, 11)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 11)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 12)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 12)
cursor.execute('select test')
self.assertEqual(Connection.num_pings, 13)
db = SteadyDBconnect(dbapi, ping=7)
self.assertEqual(Connection.num_pings, 13)
db.cursor()
self.assertEqual(Connection.num_pings, 14)
db.close()
cursor = db.cursor()
self.assertEqual(Connection.num_pings, 15)
cursor.execute('select test')
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfigResponse',
'GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponse',
'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyResponse',
'GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfigResponse',
]
@pulumi.output_type
class GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfigResponse(dict):
"""
AcceleratorConfig defines the accelerator cards to attach to the VM.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acceleratorCount":
suggest = "accelerator_count"
elif key == "acceleratorType":
suggest = "accelerator_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaAcceleratorConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
accelerator_count: str,
accelerator_type: str):
"""
AcceleratorConfig defines the accelerator cards to attach to the VM.
:param str accelerator_count: The number of guest accelerator cards exposed to each VM.
:param str accelerator_type: The type of accelerator to attach to each VM, e.g. "nvidia-tesla-k80" for nVidia Tesla K80.
"""
pulumi.set(__self__, "accelerator_count", accelerator_count)
pulumi.set(__self__, "accelerator_type", accelerator_type)
@property
@pulumi.getter(name="acceleratorCount")
def accelerator_count(self) -> str:
"""
The number of guest accelerator cards exposed to each VM.
"""
return pulumi.get(self, "accelerator_count")
@property
@pulumi.getter(name="acceleratorType")
def accelerator_type(self) -> str:
"""
The type of accelerator to attach to each VM, e.g. "nvidia-tesla-k80" for nVidia Tesla K80.
"""
return pulumi.get(self, "accelerator_type")
@pulumi.output_type
class GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponse(dict):
"""
Autoscale defines the autoscaling policy of a worker pool.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxSize":
suggest = "max_size"
elif key == "minSize":
suggest = "min_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaAutoscaleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_size: str,
min_size: str):
"""
Autoscale defines the autoscaling policy of a worker pool.
:param str max_size: The maximal number of workers. Must be equal to or greater than min_size.
:param str min_size: The minimal number of workers. Must be greater than 0.
"""
pulumi.set(__self__, "max_size", max_size)
pulumi.set(__self__, "min_size", min_size)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> str:
"""
The maximal number of workers. Must be equal to or greater than min_size.
"""
return pulumi.get(self, "max_size")
@property
@pulumi.getter(name="minSize")
def min_size(self) -> str:
"""
The minimal number of workers. Must be greater than 0.
"""
return pulumi.get(self, "min_size")
@pulumi.output_type
class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse(dict):
"""
Defines whether a feature can be used or what values are accepted.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedValues":
suggest = "allowed_values"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_values: Sequence[str],
policy: str):
"""
Defines whether a feature can be used or what values are accepted.
:param Sequence[str] allowed_values: A list of acceptable values. Only effective when the policy is `RESTRICTED`.
:param str policy: The policy of the feature.
"""
pulumi.set(__self__, "allowed_values", allowed_values)
pulumi.set(__self__, "policy", policy)
@property
@pulumi.getter(name="allowedValues")
def allowed_values(self) -> Sequence[str]:
"""
A list of acceptable values. Only effective when the policy is `RESTRICTED`.
"""
return pulumi.get(self, "allowed_values")
@property
@pulumi.getter
def policy(self) -> str:
"""
The policy of the feature.
"""
return pulumi.get(self, "policy")
@pulumi.output_type
class GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyResponse(dict):
"""
FeaturePolicy defines features allowed to be used on RBE instances, as well as instance-wide behavior changes that take effect without opt-in or opt-out at usage time.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerImageSources":
suggest = "container_image_sources"
elif key == "dockerAddCapabilities":
suggest = "docker_add_capabilities"
elif key == "dockerChrootPath":
suggest = "docker_chroot_path"
elif key == "dockerNetwork":
suggest = "docker_network"
elif key == "dockerPrivileged":
suggest = "docker_privileged"
elif key == "dockerRunAsRoot":
suggest = "docker_run_as_root"
elif key == "dockerRuntime":
suggest = "docker_runtime"
elif key == "dockerSiblingContainers":
suggest = "docker_sibling_containers"
elif key == "linuxIsolation":
suggest = "linux_isolation"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_image_sources: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_add_capabilities: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_chroot_path: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_network: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_privileged: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_run_as_root: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_runtime: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
docker_sibling_containers: 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse',
linux_isolation: str):
"""
FeaturePolicy defines features allowed to be used on RBE instances, as well as instance-wide behavior changes that take effect without opt-in or opt-out at usage time.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' container_image_sources: Which container image sources are allowed. Currently only RBE-supported registry (gcr.io) is allowed. One can allow all repositories under a project or one specific repository only. E.g. container_image_sources { policy: RESTRICTED allowed_values: [ "gcr.io/project-foo", "gcr.io/project-bar/repo-baz", ] } will allow any repositories under "gcr.io/project-foo" plus the repository "gcr.io/project-bar/repo-baz". Default (UNSPECIFIED) is equivalent to any source is allowed.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_add_capabilities: Whether dockerAddCapabilities can be used or what capabilities are allowed.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_chroot_path: Whether dockerChrootPath can be used.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_network: Whether dockerNetwork can be used or what network modes are allowed. E.g. one may allow `off` value only via `allowed_values`.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_privileged: Whether dockerPrivileged can be used.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_run_as_root: Whether dockerRunAsRoot can be used.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_runtime: Whether dockerRuntime is allowed to be set or what runtimes are allowed. Note linux_isolation takes precedence, and if set, docker_runtime values may be rejected if they are incompatible with the selected isolation.
:param 'GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse' docker_sibling_containers: Whether dockerSiblingContainers can be used.
:param str linux_isolation: linux_isolation allows overriding the docker runtime used for containers started on Linux.
"""
pulumi.set(__self__, "container_image_sources", container_image_sources)
pulumi.set(__self__, "docker_add_capabilities", docker_add_capabilities)
pulumi.set(__self__, "docker_chroot_path", docker_chroot_path)
pulumi.set(__self__, "docker_network", docker_network)
pulumi.set(__self__, "docker_privileged", docker_privileged)
pulumi.set(__self__, "docker_run_as_root", docker_run_as_root)
pulumi.set(__self__, "docker_runtime", docker_runtime)
pulumi.set(__self__, "docker_sibling_containers", docker_sibling_containers)
pulumi.set(__self__, "linux_isolation", linux_isolation)
@property
@pulumi.getter(name="containerImageSources")
def container_image_sources(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Which container image sources are allowed. Currently only RBE-supported registry (gcr.io) is allowed. One can allow all repositories under a project or one specific repository only. E.g. container_image_sources { policy: RESTRICTED allowed_values: [ "gcr.io/project-foo", "gcr.io/project-bar/repo-baz", ] } will allow any repositories under "gcr.io/project-foo" plus the repository "gcr.io/project-bar/repo-baz". Default (UNSPECIFIED) is equivalent to any source is allowed.
"""
return pulumi.get(self, "container_image_sources")
@property
@pulumi.getter(name="dockerAddCapabilities")
def docker_add_capabilities(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerAddCapabilities can be used or what capabilities are allowed.
"""
return pulumi.get(self, "docker_add_capabilities")
@property
@pulumi.getter(name="dockerChrootPath")
def docker_chroot_path(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerChrootPath can be used.
"""
return pulumi.get(self, "docker_chroot_path")
@property
@pulumi.getter(name="dockerNetwork")
def docker_network(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerNetwork can be used or what network modes are allowed. E.g. one may allow `off` value only via `allowed_values`.
"""
return pulumi.get(self, "docker_network")
@property
@pulumi.getter(name="dockerPrivileged")
def docker_privileged(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerPrivileged can be used.
"""
return pulumi.get(self, "docker_privileged")
@property
@pulumi.getter(name="dockerRunAsRoot")
def docker_run_as_root(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerRunAsRoot can be used.
"""
return pulumi.get(self, "docker_run_as_root")
@property
@pulumi.getter(name="dockerRuntime")
def docker_runtime(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerRuntime is allowed to be set or what runtimes are allowed. Note linux_isolation takes precedence, and if set, docker_runtime values may be rejected if they are incompatible with the selected isolation.
"""
return pulumi.get(self, "docker_runtime")
@property
@pulumi.getter(name="dockerSiblingContainers")
def docker_sibling_containers(self) -> 'outputs.GoogleDevtoolsRemotebuildexecutionAdminV1alphaFeaturePolicyFeatureResponse':
"""
Whether dockerSiblingContainers can be used.
"""
return pulumi.get(self, "docker_sibling_containers")
@property
@pulumi.getter(name="linuxIsolation")
def linux_isolation(self) -> str:
"""
linux_isolation allows overriding the docker runtime used for containers started on Linux.
"""
return pulumi.get(self, "linux_isolation")
@pulumi.output_type
class GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfigResponse(dict):
"""
Defines the configuration to be used for creating workers in the worker pool.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGb":
suggest = "disk_size_gb"
elif key == "diskType":
suggest = "disk_type"
elif key == "machineType":
suggest = "machine_type"
elif key == "maxConcurrentActions":
suggest = "max_concurrent_actions"
elif key == "minCpuPlatform":
suggest = "min_cpu_platform"
elif key == "networkAccess":
suggest = "network_access"
elif key == "soleTenantNodeType":
suggest = "sole_tenant_node_type"
elif key == "vmImage":
suggest = "vm_image"
if | |
<reponame>gabrielgomesml/AlgorithmAndDataStructureActivities<gh_stars>0
class GrafoLista:
def __init__(self, iteravel, ponderado=False, direcionado=False):
self.iteravel = iteravel
self.ponderado = ponderado
self.direcionado = direcionado
self.listaDeAdj = {}
self.criarListas(iteravel, ponderado, direcionado)
def __str__(self):
for x in self.listaDeAdj:
print(str(x) + ': ' + str(self.listaDeAdj[x]))
def __repr__(self):
return 'GrafoLista(' + str(self.iteravel) + ')'
def __getItem__(self, index):
if not index in self.listaDeAdj:
return '[' + str(index) + '] não encontrado.'
return self.listaDeAdj[index]
def ligados(self, tupla):
if type(tupla) is int:
return '[' + str(tupla) + '] não encontrado. Formato inválido.'
if tupla[0] not in self.listaDeAdj or tupla[1] not in self.listaDeAdj:
return '[' + str(tupla) + '] não encontrado. Par não existente nessa tupla.'
if not self.ponderado:
if tupla[1] in self.listaDeAdj[tupla[0]] or tupla[0] in self.listaDeAdj[tupla[1]]:
return True
elif self.ponderado:
for x in self.listaDeAdj[tupla[0]]:
if tupla[1] is x[0]:
return True
for x in self.listaDeAdj[tupla[1]]:
if tupla[0] is x[0]:
return True
return False
def grauDeSaida(self, vertice):
if not vertice in self.listaDeAdj:
return '[' + str(vertice) + '] não encontrado.'
return len(self.listaDeAdj[vertice])
def grauDeEntrada(self, vertice):
if not vertice in self.listaDeAdj:
return '[' + str(vertice) + '] não encontrado.'
aux = 0
if not self.ponderado:
for x in self.listaDeAdj:
if vertice in self.listaDeAdj[x]:
aux += 1
elif self.ponderado:
for x in self.listaDeAdj:
for y in self.listaDeAdj[x]:
if vertice is y[0]:
aux += 1
return aux
def adjacente(self, vertice):
if not vertice in self.listaDeAdj:
return '[' + str(vertice) + '] não encontrado.'
return self.listaDeAdj[vertice]
def maiorAresta(self):
if not self.ponderado:
return 'Grafo não-ponderado.'
maiores = []
aux = 0
for x in self.listaDeAdj:
for y in self.listaDeAdj[x]:
if y[1] > aux:
aux = y[1]
for x in self.listaDeAdj:
for y in self.listaDeAdj[x]:
if y[1] == aux :
maiores.append((x, y[0]))
return 'Arestas de peso: ' + str(aux) + ' // ' + 'Vértices ligados a ela: ' + str(maiores)
def menorAresta(self):
if not self.ponderado:
return 'Grafo não-ponderado.'
menores = []
aux = 100000000000 #RevisarIssoAqui
for x in self.listaDeAdj:
for y in self.listaDeAdj[x]:
if y[1] < aux:
aux = y[1]
for x in self.listaDeAdj:
for y in self.listaDeAdj[x]:
if y[1] == aux :
menores.append((x, y[0]))
return 'Arestas de peso: ' + str(aux) + ' // ' + 'Vértices ligados a ela: ' + str(menores)
def adicionaVertice(self, vertice):
#if type(tuple) is int:
#return '[' + str(tupla) + '] não adicionado. Formato inválido.'
#if self.ponderado and len(tupla) != 3:
#return '[' + str(tupla) + '] não adicionado. Formato inválido para tuplas ponderadas.'
#elif not self.ponderado and len(tupla) != 2:
#return '[' + str(tupla) + '] não adicionado. Formato inválido para tuplas não-ponderadas.'
#self.criarListas([tupla], self.ponderado, self.direcionado)
if type(vertice) != int:
return '[' + str(vertice) + '] não adicionado. Formato inválido.'
if vertice in self.listaDeAdj:
return '[' + str(vertice) + '] já existente.'
else:
self.listaDeAdj[vertice] = []
def adicionaAresta(self, tupla):
if type(tupla) is int:
return '[' + str(tupla) + '] não adicionado. Formato inválido.'
if self.ponderado and len(tupla) != 3:
return '[' + str(tupla) + '] não adicionado. Formato inválido para tuplas ponderadas.'
elif not self.ponderado and len(tupla) != 2:
return '[' + str(tupla) + '] não adicionado. Formato inválido para tuplas não-ponderadas.'
if tupla[0] not in self.listaDeAdj or tupla[1] not in self.listaDeAdj:
return '[' + str(tupla) + '] não adicionado. Par não existente nessa tupla.'
self.criarListas([tupla], self.ponderado, self.direcionado)
if type(self.iteravel[0]) is tuple:
self.iteravel += tuple(tupla),
elif type(self.iteravel[0]) is list:
self.iteravel.append(list(tupla))
print('[' + str(tupla) + '] adicionada.')
def removeAresta(self, tupla):
if type(tupla) is int:
return '[' + str(tupla) + '] não encontrado. Formato inválido.'
elif len(tupla) != 2:
return '[' + str(tupla) + '] não encontrado. Formato inválido.'
if tupla[0] not in self.listaDeAdj or tupla[1] not in self.listaDeAdj:
return '[' + str(tupla) + '] não removido. Par não existente nessa tupla.'
if not self.direcionado:
self.listaDeAdj[tupla[0]].remove(tupla[1])
self.listaDeAdj[tupla[1]].remove(tupla[0])
else:
self.listaDeAdj[tupla[0]].remove(tupla[1])
print('[' + str(tupla) + '] removida.')
def criarListas(self, iteravel, ponderado, direcionado):
if ponderado:
for tupla in iteravel:
origem, destino, peso = tupla
if not origem in self.listaDeAdj:
self.listaDeAdj[origem] = []
self.listaDeAdj[origem].append((destino, peso))
if not direcionado:
if not destino in self.listaDeAdj:
self.listaDeAdj[destino] = []
self.listaDeAdj[destino].append((origem, peso))
else:
for tupla in iteravel:
origem, destino = tupla
if not origem in self.listaDeAdj:
self.listaDeAdj[origem] = []
self.listaDeAdj[origem].append(destino)
if not direcionado:
if not destino in self.listaDeAdj:
self.listaDeAdj[destino] = []
self.listaDeAdj[destino].append(origem)
def dfs(self, v, antecessores, marcados):
marcados[v] = True
if self.ponderado:
for x in self.adjacente(v):
if not marcados[x[0]]:
antecessores[x[0]] = v
self.dfs(x[0], antecessores, marcados)
else:
for x in self.adjacente(v):
if not marcados[x]:
antecessores[x] = v
self.dfs(x, antecessores, marcados)
def buscaEmProfundidade(self):
marcados = {}
antecessores = {}
for x in self.listaDeAdj:
marcados[x] = False
antecessores[x] = -1
for v in marcados:
if not marcados[v]:
self.dfs(v, antecessores, marcados)
return antecessores
def buscaEmLargura(self):
marcados = {}
antecessores = {}
vertices = []
for x in self.listaDeAdj:
marcados[x] = False
antecessores[x] = -1
for y in marcados:
if not marcados[y]:
vertices.append(y)
marcados[y] = True
while len(vertices) > 0:
v = vertices.pop(0)
if self.ponderado:
for z in self.adjacente(v):
if not marcados[z[0]]:
marcados[z[0]] = True
antecessores[z[0]] = v
vertices.append(z[0])
else:
for z in self.adjacente(v):
if not marcados[z]:
marcados[z] = True
antecessores[z] = v
vertices.append(z)
return antecessores
def converterListaMatriz(self):
return GrafoMatrizes(self.iteravel, self.ponderado, self.direcionado)
import numpy as np
class GrafoMatrizes:
def __init__(self, iteravel, ponderado=False, direcionado=False):
self.iteravel = iteravel
self.ponderado = ponderado
self.direcionado = direcionado
self.v = []
for duplas in self.iteravel:
if self.ponderado:
for vertice in duplas[:len(duplas)-1]:
if vertice not in self.v:
self.v.append(vertice)
else:
for vertice in duplas:
if vertice not in self.v:
self.v.append(vertice)
self.matrizesDeAdj = self.criarMatrizes(iteravel, ponderado, direcionado)
def index(self, vertice):
aux = 0
for n in range(len(self.v)):
if self.v[n] != vertice:
aux += 1
else:
return aux
def criarMatrizes(self, iteravel, ponderado, direcionado):
tam = len(self.v)
corpo = np.zeros((tam,tam), int)
if not self.ponderado:
if not self.direcionado:
for x in iteravel:
corpo[self.index(x[0])][self.index(x[1])] = 1
corpo[self.index(x[1])][self.index(x[0])] = 1
else:
for x in iteravel:
corpo[self.index(x[0])][self.index(x[1])] = 1
else:
if not self.direcionado:
for x in iteravel:
corpo[self.index(x[0])][self.index(x[1])] = x[2]
corpo[self.index(x[1])][self.index(x[0])] = x[2]
else:
for x in iteravel:
corpo[self.index(x[0])][self.index(x[1])] = x[2]
return corpo
def __str__(self):
tam = max(self.v)
colunas = str([x for x in self.v])
colunas = ' ' + colunas[1:len(colunas)-1].replace(',','')
print(colunas)
for x in range(len(self.matrizesDeAdj)):
print(str(self.v[x]) + ' ' + str(self.matrizesDeAdj[x]))
def __repr__(self):
return 'GrafoMatrizes(' + str(self.iteravel) + ')'
def __getItem__(self, index):
arestas = {}
if not self.ponderado:
for x in range(len(self.matrizesDeAdj)):
if index not in arestas and self.matrizesDeAdj[index][x] != 0:
arestas[index] = [(index, x)]
elif index in arestas and self.matrizesDeAdj[index][x] != 0:
arestas[index].append((index, x))
if self.direcionado:
if index not in arestas and self.matrizesDeAdj[x][index] != 0:
arestas[index] = [(x, index)]
elif index in arestas and self.matrizesDeAdj[x][index] != 0:
arestas[index].append((x, index))
else:
for x in range(len(self.matrizesDeAdj)):
if index not in arestas and self.matrizesDeAdj[index][x] != 0:
arestas[index] = [(index, x, self.matrizesDeAdj[index][x])]
elif index in arestas and self.matrizesDeAdj[index][x] != 0:
arestas[index].append((index, x, self.matrizesDeAdj[index][x]))
if self.direcionado:
if index not in arestas and self.matrizesDeAdj[x][index] != 0:
arestas[index] = [(x, index, self.matrizesDeAdj[index][x])]
elif index in arestas and self.matrizesDeAdj[x][index] != 0:
arestas[index].append((x, index, self.matrizesDeAdj[index][x]))
return arestas[index]
def grauDeSaida(self, vertice):
if vertice not in self.v:
return '[' + str(vertice) + '] não encontrado.'
aux = 0
for n in range(len(self.v)):
if self.matrizesDeAdj[self.index(vertice)][n] != 0:
aux += 1
return aux
def grauDeEntrada(self, vertice):
if not self.direcionado:
return self.grauDeSaida(vertice)
else:
aux = 0
for n in range(len(self.v)):
if self.matrizesDeAdj[n][self.index(vertice)] != 0:
aux += 1
return aux
def ligados(self, tupla):
if type(tupla) is int:
return '[' + str(tupla) + '] não encontrado. Formato inválido.'
elif len(tupla) != 2:
return '[' + str(tupla) + '] não encontrado. Formato inválido.'
elif tupla[0] not in self.v or tupla[1] not in self.v:
return '[' + str(tupla) + '] não encontrado. Par não existente nessa tupla.'
if self.matrizesDeAdj[self.index(tupla[0])][self.index(tupla[1])] or self.matrizesDeAdj[self.index(tupla[1])][self.index(tupla[0])] != 0:
return True
return False
def adjacente(self, vertice):
if vertice not in self.v:
return '[' + str(vertice) + '] não encontrado.'
else:
lista = []
for x in self.v:
if not self.direcionado:
if self.matrizesDeAdj[self.index(vertice)][self.index(x)] != 0:
lista.append(x)
else:
if self.matrizesDeAdj[self.index(vertice)][self.index(x)] != 0:
lista.append(x)
if self.matrizesDeAdj[self.index(x)][self.index(vertice)] != 0 | |
<gh_stars>1-10
#!/usr/bin/env python3 # NOQA
# -*- coding: utf-8 -*-
"""ruamel_config, yaml based config object using the ruamel python module"""
# pylint: disable=line-too-long
# pylint: disable=W1202
# TODO: 2/24/2018 - Try to get the most simplified ruamel config working
#################################
# NOTE: basic yaml terminology converted to python primitaves, based on yaml 1.2 docs
# Nodes = A YAML node represents a single native data structure. Such nodes have content of one of three kinds: scalar, sequence, or mapping. In addition, each node has a tag which serves to restrict the set of possible values the content can have.
# sequence = array ... Extended: The content of a sequence node is an ordered series of zero or more nodes. In particular, a sequence may contain the same node more than once. It could even contain itself(directly or indirectly).
# mappings = dict ... Extended: The content of a mapping node is an unordered set of key: value node pairs, with the restriction that each of the keys is unique. YAML places no further restrictions on the nodes. In particular, keys may be arbitrary nodes, the same node may be used as the value of several key: value pairs, and a mapping could even contain itself as a key or a value (directly or indirectly).
# scalar = str or another yaml node obj ... Extended: The content of a scalar node is an opaque datum that can be presented as a series of zero or more Unicode characters.
#################################
from __future__ import absolute_import, unicode_literals
import codecs
import collections
import fnmatch
import logging
import os
import shutil
import sys
import re
import tempfile
import shutil
from collections import OrderedDict
from gettext import gettext as _
from time import time
from typing import Any, Dict, Optional
import uuid
# ruamel.yaml supports round-trip preserving dict ordering,
# comments, etc., which is why we use it instead of the usual yaml
# module. Remember the project file is intended to go into source
# control.
import ruamel.yaml # pragma: no cover
from ruamel.yaml import YAML # pragma: no cover
from ruamel.yaml.error import YAMLError # pragma: no cover
from ruamel.yaml.comments import CommentedMap # pragma: no cover
from ruamel.yaml.comments import CommentedSeq # pragma: no cover
from ruamel.yaml.compat import StringIO # pragma: no cover
import voluptuous as vol
from layeredconfig import Defaults, DictSource, LayeredConfig
from voluptuous.humanize import humanize_error
# from scarlett_os import SCARLETT_ROOT_DIR
import scarlett_os.helpers.config_validation as cv
from scarlett_os.compat import basestring, bytes, integer_types, string_types, text_type
from scarlett_os.const import (
CONF_CUSTOMIZE,
CONF_CUSTOMIZE_DOMAIN,
CONF_CUSTOMIZE_GLOB,
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_OWNERS_NAME,
CONF_PACKAGES,
CONF_TEMPERATURE_UNIT,
CONF_TIME_ZONE,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
TEMP_CELSIUS,
__version__,
)
from scarlett_os.internal import path as path_internal
from scarlett_os.internal.path import mkdir_if_does_not_exist, ensure_dir_exists
from scarlett_os.internal.rename import rename_over_existing
from scarlett_os.utility import dt as date_util
from scarlett_os.utility import location as loc_util
from scarlett_os.utility import environment as env_util
import xdg
import warnings
# SOURCE: Ruamel docs, "Output of dump() as a string"
class MyYAML(YAML): # pragma: no cover
"""[More memory efficent YAML dump]
"""
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
logger = logging.getLogger(__name__)
# NOTE: (bytes, int, str, str)
SCALARS = (bytes,) + integer_types + string_types + (text_type,)
# yaml = YAML()
# TODO: Do we want to use this or not? See ruamel section "Output of dump() as a string"
# TODO: #
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.explicit_start = True
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.version = (1, 2) # set yaml version to 1.2
# yaml.allow_unicode = False
RE_ASCII = re.compile(r"\033\[[^m]*m") # source: home-assistant
# NOTE: We are using https://github.com/srstevenson/xdg
# NOTE: This enforces the [XDG Base Directory Specification]
# https://specifications.freedesktop.org/basedir-spec/basedir-spec-0.6.html
# NOTEL Shamelesssly borrowed from udiskie
# source: https://github.com/coldfix/udiskie/blob/master/udiskie/config.py
YAML_CONFIG_FILE = "config.yaml"
CONFIG_DIR_NAME = "scarlett"
VERSION_FILE = ".SCARLETT_VERSION"
CoordinatesTuple = collections.namedtuple("Coordinates", ["latitude", "longitude"])
DEFAULT_CORE_CONFIG = (
# Tuples (attribute, default, auto detect property, description)
(
CONF_NAME,
"Scarlett Home",
None,
"Name of the location where Scarlett System is " "running",
),
(
CONF_LATITUDE,
0,
"latitude",
"Location required to calculate the time" " the sun rises and sets",
),
(CONF_LONGITUDE, 0, "longitude", None),
(
CONF_ELEVATION,
0,
None,
"Impacts weather/sunrise data" " (altitude above sea level in meters)",
),
(
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_METRIC,
None,
"{} for Metric, {} for Imperial".format(
CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL
),
),
(
CONF_TIME_ZONE,
"UTC",
"time_zone",
"Pick yours from here: http://en.wiki"
"pedia.org/wiki/List_of_tz_database_time_zones",
),
) # type: Tuple[Tuple[str, Any, Any, str], ...]
# NOTE: This is how you get a functions name automatically
# source: https://stackoverflow.com/questions/251464/how-to-get-a-function-name-as-a-string-in-python
# sys._getframe().f_code.co_name
# FIXME: Old defaults 2/25/2018
# DEFAULT_CONFIG = """
# # Omitted values in this section will be auto detected using freegeoip.io
# # Name for Scarlett to call user
# owners_name: '<NAME>'
# pocketsphinx:
# hmm: /home/pi/.virtualenvs/scarlett_os/share/pocketsphinx/model/en-us/en-us
# lm: /home/pi/dev/bossjones-github/scarlett_os/static/speech/lm/1473.lm
# dict: /home/pi/dev/bossjones-github/scarlett_os/static/speech/dict/1473.dic
# silprob: 0.1
# wip: 1e-4
# bestpath: 0
# keywords_list:
# - 'scarlett'
# - 'SCARLETT'
# features:
# - time
# - help
# - party
# """
# def dump_yaml(layered_config):
# """[summary]
# Arguments:
# layered_config {[type]} -- [description]
# Returns:
# [type] -- [description]
# """
# # source:
# # https://github.com/vmfarms/farmer/blob/e3f8b863b51b21dfa2d11d2453eac86ed0ab9bc9/farmer/commands/config.py
# return ruamel.yaml.round_trip_dump(layered_config.dump(layered_config),
# default_flow_style=False)
DEFAULT_CONFIG = """
# Omitted values in this section will be auto detected using freegeoip.io
# Location required to calculate the time the sun rises and sets.
# Coordinates are also used for location for weather related automations.
# Google Maps can be used to determine more precise GPS coordinates.
latitude: 40.7056308
longitude: -73.9780034
pocketsphinx:
hmm: /home/pi/.virtualenvs/scarlett_os/share/pocketsphinx/model/en-us/en-us
lm: /home/pi/dev/bossjones-github/scarlett_os/static/speech/lm/1473.lm
dict: /home/pi/dev/bossjones-github/scarlett_os/static/speech/dict/1473.dic
# Silence word transition probability
silprob: 0.1
# ********************************************************
# FIXME: ????? THIS IS THE ORIG VALUE, do we need too set it back? 8/5/2018 # wip: 1e-4
# Enable Graph Search | Boolean. Default: true
# ********************************************************
# Word insertion penalty
wip: 0.0001
device: plughw:CARD=Device,DEV=0
# ********************************************************
# FIXME: ????? THIS IS THE ORIG VALUE, do we need too set it back? 8/5/2018 # bestpath: 0
# Enable Graph Search | Boolean. Default: true
# ********************************************************
bestpath: True
# Enable Flat Lexicon Search | Default: true
fwdflat: True
# Evaluate acoustic model every N frames | Integer. Range: 1 - 10 Default: 1
dsratio: 1
# Maximum number of HMMs searched per frame | Integer. Range: 1 - 100000 Default: 30000
maxhmmpf: 3000
# Impacts weather/sunrise data
elevation: 665
# 'metric' for Metric System, 'imperial' for imperial system
unit_system: metric
# Pick yours from here:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
time_zone: America/New_York
# Name of the location where ScarlettOS Assistant is running
name: ScarlettOS
owner: "<NAME>"
keywords_list:
- 'scarlett'
- 'SCARLETT'
features:
- time
graphviz_debug_dir: /home/pi/dev/bossjones-github/scarlett_os/_debug
"""
RUSSIAN_CONFIG = """
# Name of the location where ScarlettOS Assistant is running
name: Бага
owner: "<NAME>"
"""
def lower(a_string):
"""[Make string lowercase.]
Arguments:
a_string {[str]} -- [takes string and converts all characters to lowercase]
Returns:
[str] -- [returns transformed string, in all lowercase]
"""
try:
return a_string.lower()
except AttributeError:
return a_string
def flatten(d, parent_key="", sep="/"):
# source: http://stackoverflow.com/a/6027615
# source:
# https://github.com/russellballestrini/yaml_consulate/blob/76d74ec7ffe5fd56ee057a619f12dcc8a862b046/yaml_consulate/yaml_consulate.py
"""[summary]
Arguments:
d {[type]} -- [description]
Returns:
[type] -- [description]
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
# FIXME: add valid docblock
def mapping_string_access(self, s, delimiter=None, key_delim=None): # pragma: no cover
"""[summary]
Arguments:
s {[type]} -- [description]
Keyword Arguments:
delimiter {[type]} -- [description] (default: {None})
key_delim {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
# FIXME: Make this into a real docstring
# source:
# https://stackoverflow.com/questions/39463936/python-accessing-yaml-values-using-dot-notation
# INFO:
# Inner Functions – What Are They Good
# For? - https://realpython.com/blog/python/inner-functions-what-are-they-good-for/
def p(v): # pragma: no cover
"""[summary]
Arguments:
v {[type]} -- [description]
Returns:
[type] -- [description]
"""
try:
v = int(v)
except Exception:
pass
return v
# possible extend for primitives like float, datetime, booleans, etc.
if delimiter is None:
delimiter = "."
if key_delim is None:
key_delim = ","
try:
key, rest = s.split(delimiter, 1)
except ValueError:
key, rest = s, None
if key_delim in key:
key = tuple((p(key) for key in key.split(key_delim)))
else:
key = p(key)
if rest is None:
return self[key]
return self[key].string_access(rest, delimiter, key_delim)
# monkeypatch CommentedMap.string_access function
ruamel.yaml.comments.CommentedMap.string_access = mapping_string_access
# FIXME: add valid docblock
# FIXME: Try to borrow test from ruamel and add it to our test suite
def sequence_string_access(self, s, delimiter=None, key_delim=None): # pragma: no cover
"""[summary]
Arguments:
s {[type]} -- [description]
Keyword Arguments:
delimiter {[type]} -- [description] (default: {None})
key_delim {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
# source:
# https://stackoverflow.com/questions/39463936/python-accessing-yaml-values-using-dot-notation
if delimiter is None:
delimiter = "."
try:
key, rest = s.split(delimiter, 1)
except ValueError:
key, rest = s, None
key = int(key)
if | |
8))
(Re,) = unpack("=1d", stateFile.read(1 * 8))
(tilt_angle,) = unpack("=1d", stateFile.read(1 * 8))
(dt,) = unpack("=1d", stateFile.read(1 * 8))
(itime,) = unpack("=1i", stateFile.read(1 * 4))
(time,) = unpack("=1d", stateFile.read(1 * 8))
ny_half = ny // 2
nxp, nzp = nx // 2 - 1, nz // 2 - 1
header = (forcing, nx, ny, nz, Lx, Lz, Re, tilt_angle, dt, itime, time)
# Read into y, z, x
state = np.zeros((2 * ny_half, nz, nx, 3), dtype=np.float64)
# read u and v in full
for n in range(0, 2):
nCells = (nx - 1) * (2 * ny_half) * (nz - 1)
toReshape = np.asarray(
unpack("={}d".format(nCells), stateFile.read(nCells * 8))
)
# These were written in Fortran order, so read back in that order
buff = np.reshape(toReshape, (2 * ny_half, nz - 1, nx - 1), order="F")
state[:, : nzp + 1, : nxp + 1, n] = buff[:, : nzp + 1, : nxp + 1]
state[:, : nzp + 1, nxp + 2 :, n] = buff[:, : nzp + 1, nxp + 1 :]
state[:, nzp + 2 :, : nxp + 1 :, n] = buff[:, nzp + 1 :, : nxp + 1]
state[:, nzp + 2 :, nxp + 2 :, n] = buff[:, nzp + 1 :, nxp + 1 :]
# read only the zero mode for w
nCells = (nx - 1) * (2 * ny_half)
toReshape = np.asarray(unpack("={}d".format(nCells), stateFile.read(nCells * 8)))
buff = np.reshape(toReshape, (2 * ny_half, 1, nx - 1), order="F")
state[:, 0, : nxp + 1, 2] = buff[:, 0, : nxp + 1]
state[:, 0, nxp + 2 :, 2] = buff[:, 0, nxp + 1 :]
stateFile.close()
# compute the rest from
# kx u + ky v + kz w = 0
kx, ky, kz = wavenumbers(Lx, Lz, nx, ny_half, nz)
for j in range(ny_half):
for k in range(1, nz):
for i in range(nx):
state[2 * j, k, i, 2] = (
-(kx[i] * state[2 * j, k, i, 0] + ky[j] * state[2 * j, k, i, 1])
/ kz[k]
)
state[2 * j + 1, k, i, 2] = (
-(
kx[i] * state[2 * j + 1, k, i, 0]
+ ky[j] * state[2 * j + 1, k, i, 1]
)
/ kz[k]
)
# Pack into a complex array
state_ = np.zeros((ny_half, nz, nx, 3), dtype=np.complex128)
for j in range(ny_half):
state_[j, :, :, :] = state[2 * j, :, :, :] + 1j * state[2 * j + 1, :, :, :]
# Rotate to x, y, z
state__ = np.moveaxis(state_, [0, 1, 2, 3], [1, 2, 0, 3])
return state__, header
def readState_nocompact(stateFilePath):
stateFilePath = Path(stateFilePath)
stateFile = open(stateFilePath, "rb")
(forcing,) = unpack("=1i", stateFile.read(1 * 4))
nx, ny, nz = unpack("=3i", stateFile.read(3 * 4))
Lx, Lz = unpack("=2d", stateFile.read(2 * 8))
(Re,) = unpack("=1d", stateFile.read(1 * 8))
(tilt_angle,) = unpack("=1d", stateFile.read(1 * 8))
(dt,) = unpack("=1d", stateFile.read(1 * 8))
(itime,) = unpack("=1i", stateFile.read(1 * 4))
(time,) = unpack("=1d", stateFile.read(1 * 8))
ny_half = ny // 2
nxp, nzp = nx // 2 - 1, nz // 2 - 1
header = (forcing, nx, ny, nz, Lx, Lz, Re, tilt_angle, dt, itime, time)
# Read into y, z, x
state = np.zeros((2 * ny_half, nz, nx, 3), dtype=np.float64)
for n in range(3):
nCells = (nx - 1) * (2 * ny_half) * (nz - 1)
toReshape = np.asarray(
unpack("={}d".format(nCells), stateFile.read(nCells * 8))
)
# These were written in Fortran order, so read back in that order
buff = np.reshape(toReshape, (2 * ny_half, nz - 1, nx - 1), order="F")
state[:, : nzp + 1, : nxp + 1, n] = buff[:, : nzp + 1, : nxp + 1]
state[:, : nzp + 1, nxp + 2 :, n] = buff[:, : nzp + 1, nxp + 1 :]
state[:, nzp + 2 :, : nxp + 1 :, n] = buff[:, nzp + 1 :, : nxp + 1]
state[:, nzp + 2 :, nxp + 2 :, n] = buff[:, nzp + 1 :, nxp + 1 :]
stateFile.close()
# Pack into a complex array
state_ = np.zeros((ny_half, nz, nx, 3), dtype=np.complex128)
for j in range(ny_half):
state_[j, :, :, :] = state[2 * j, :, :, :] + 1j * state[2 * j + 1, :, :, :]
# Rotate to x, y, z
state__ = np.moveaxis(state_, [0, 1, 2, 3], [1, 2, 0, 3])
return state__, header
def readState(stateFilePath):
stateFilePath = Path(stateFilePath)
stateFile = open(stateFilePath, "rb")
(forcing,) = unpack("=1i", stateFile.read(1 * 4))
nx, ny, nz = unpack("=3i", stateFile.read(3 * 4))
stateFile.close()
ny_half = ny // 2
if nx - 1 >= ny_half and nx - 1 >= nz - 1:
return readState_xcompact(stateFilePath)
elif ny_half >= nx - 1 and ny_half >= nz - 1:
return readState_ycompact(stateFilePath)
else:
return readState_zcompact(stateFilePath)
def writeState_xcompact(
state,
forcing=1,
Lx=4,
Lz=2,
Re=628.3185307179584,
tilt_angle=0.0,
dt=0.03183098861837907,
itime=0,
time=0.0,
outFile="state.000000",
):
# Write array to a dnsbox state file.
outFile = Path(outFile)
stateFile = open(outFile, "wb")
nx, ny_half, nz, _ = state.shape
ny = ny_half * 2
nxp, nzp = nx // 2 - 1, nz // 2 - 1
stateFile.write(pack("=1i", forcing))
stateFile.write(pack("=3i", nx, ny, nz))
stateFile.write(pack("=2d", Lx, Lz))
stateFile.write(pack("=1d", Re))
stateFile.write(pack("=1d", tilt_angle))
stateFile.write(pack("=1d", dt))
stateFile.write(pack("=1i", itime))
stateFile.write(pack("=1d", time))
# convert to y, z, x
state_ = np.moveaxis(state, [0, 1, 2], [2, 0, 1])
# Convert to a real valued array
stateOut = np.zeros((2 * ny_half, nz, nx, 3), dtype=np.float64, order="F")
for j in range(ny_half):
stateOut[2 * j, :, :, :] = state_[j, :, :, :].real
stateOut[2 * j + 1, :, :, :] = state_[j, :, :, :].imag
# write only the zero mode for u
nCells = 2 * ny_half * (nz - 1)
buff = np.zeros((2 * ny_half, nz - 1, 1), dtype=np.float64)
buff[:, : nzp + 1, 0] = stateOut[:, : nzp + 1, 0, 0]
buff[:, nzp + 1 :, 0] = stateOut[:, nzp + 2 :, 0, 0]
dataPack = pack("={}d".format(nCells), *buff.flatten(order="F"))
stateFile.write(dataPack)
# write everything for v and w
for n in range(1, 3):
nCells = (nx - 1) * 2 * ny_half * (nz - 1)
buff = np.zeros((2 * ny_half, nz - 1, nx - 1), dtype=np.float64)
buff[:, : nzp + 1, : nxp + 1] = stateOut[:, : nzp + 1, : nxp + 1, n]
buff[:, : nzp + 1, nxp + 1 :] = stateOut[:, : nzp + 1, nxp + 2 :, n]
buff[:, nzp + 1 :, : nxp + 1] = stateOut[:, nzp + 2 :, : nxp + 1, n]
buff[:, nzp + 1 :, nxp + 1 :] = stateOut[:, nzp + 2 :, nxp + 2 :, n]
dataPack = pack("={}d".format(nCells), *buff.flatten(order="F"))
stateFile.write(dataPack)
stateFile.close()
def writeState_ycompact(
state,
forcing=1,
Lx=4,
Lz=2,
Re=628.3185307179584,
tilt_angle=0.0,
dt=0.03183098861837907,
itime=0,
time=0.0,
outFile="state.000000",
):
# Write array to a dnsbox state file.
outFile = Path(outFile)
stateFile = open(outFile, "wb")
nx, ny_half, nz, _ = state.shape
ny = ny_half * 2
nxp, nzp = nx // 2 - 1, nz // 2 - 1
stateFile.write(pack("=1i", forcing))
stateFile.write(pack("=3i", nx, ny, nz))
stateFile.write(pack("=2d", Lx, Lz))
stateFile.write(pack("=1d", Re))
stateFile.write(pack("=1d", tilt_angle))
stateFile.write(pack("=1d", dt))
stateFile.write(pack("=1i", itime))
stateFile.write(pack("=1d", time))
# convert to y, z, x
state_ = np.moveaxis(state, [0, 1, 2], [2, 0, 1])
# Convert to a real valued array
stateOut = np.zeros((2 * ny_half, nz, nx, 3), dtype=np.float64, order="F")
for j in range(ny_half):
stateOut[2 * j, :, :, :] = state_[j, :, :, :].real
stateOut[2 * j + 1, :, :, :] = state_[j, :, :, :].imag
# write everything for u
nCells = (nx - 1) * 2 * ny_half * (nz - 1)
buff = np.zeros((2 * ny_half, nz - 1, nx - 1), dtype=np.float64)
buff[:, : nzp + 1, : nxp + 1] = | |
"param3": "https://foo.com",
},
},
{
"component_ref": {"hub": "echo"},
"name": "C",
"params": {
"param1": "{{ ops.A.outputs.x }}",
"param2": "{{ ops.B.outputs.x }}",
},
},
{
"component_ref": {"hub": "echo"},
"name": "D",
"dependencies": ["B", "C"],
},
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
assert config.dag == {}
# Process the dag
config.process_dag()
# Todo must resolve the components from the hub
config.process_components()
config.validate_dag()
dag = config.dag
assert len(dag) == 4
assert config.get_independent_ops(dag=dag) == {"A"}
assert dags.get_independent_ops(dag=dag) == {"A"}
assert config.get_orphan_ops(dag=dag) == set([])
assert dags.get_orphan_ops(dag=dag) == set([])
assert config.sort_topologically(dag=dag) == [["A"], ["B"], ["C"], ["D"]]
assert dags.sort_topologically(dag=dag) == [["A"], ["B"], ["C"], ["D"]]
assert config.dag["A"].op.name == "A"
assert config.dag["A"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["A"].upstream == set()
assert config.dag["A"].downstream == {"B", "C"}
assert config.dag["B"].op.name == "B"
assert config.dag["B"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["B"].upstream == {"A"}
assert config.dag["B"].downstream == {"C", "D"}
assert config.dag["C"].op.name == "C"
assert config.dag["C"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["C"].upstream == {"A", "B"}
assert config.dag["C"].downstream == {"D"}
assert config.dag["D"].op.name == "D"
assert config.dag["D"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["D"].upstream == {"B", "C"}
assert config.dag["D"].downstream == set()
def test_dag_dependency_and_params(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"hub": "echo"},
"name": "A",
"params": {
"param1": "text",
"param2": 12,
"param3": "https://foo.com",
},
},
{
"component_ref": {"hub": "echo"},
"name": "B",
"params": {
"param1": "{{ ops.A.outputs.x }}",
"param2": 12,
"param3": "https://foo.com",
},
"dependencies": ["A"],
},
{
"component_ref": {"hub": "echo"},
"name": "C",
"params": {"param2": "{{ ops.B.outputs.x }}"},
"dependencies": ["A"],
},
{
"component_ref": {"hub": "echo"},
"name": "D",
"dependencies": ["B", "C"],
},
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
assert config.dag == {}
# Process the dag
config.process_dag()
# TODO resolve components from hub
config.process_components()
config.validate_dag()
dag = config.dag
assert len(dag) == 4
assert config.get_independent_ops(dag=dag) == {"A"}
assert dags.get_independent_ops(dag=dag) == {"A"}
assert config.get_orphan_ops(dag=dag) == set([])
assert dags.get_orphan_ops(dag=dag) == set([])
assert config.sort_topologically(dag=dag) == [["A"], ["B"], ["C"], ["D"]]
assert dags.sort_topologically(dag=dag) == [["A"], ["B"], ["C"], ["D"]]
assert config.dag["A"].op.name == "A"
assert config.dag["A"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["A"].upstream == set()
assert config.dag["A"].downstream == {"B", "C"}
assert config.dag["B"].op.name == "B"
assert config.dag["B"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["B"].upstream == {"A"}
assert config.dag["B"].downstream == {"C", "D"}
assert config.dag["C"].op.name == "C"
assert config.dag["C"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["C"].upstream == {"A", "B"}
assert config.dag["C"].downstream == {"D"}
assert config.dag["D"].op.name == "D"
assert config.dag["D"].op.component_ref.to_dict() == {"hub": "echo"}
assert config.dag["D"].upstream == {"B", "C"}
assert config.dag["D"].downstream == set()
def test_dag_orphan_ops(self):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"hub": "action1"}, "name": "A"},
{
"component_ref": {"hub": "event1"},
"name": "B",
"dependencies": ["A"],
},
{
"component_ref": {"name": "foo"},
"name": "C",
"dependencies": ["A", "E"],
},
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
assert config.dag == {}
# Process the dag
config.process_dag()
with self.assertRaises(PolyaxonSchemaError):
config.process_components()
with self.assertRaises(PolyaxonSchemaError):
config.validate_dag()
dag = config.dag
assert len(dag) == 4
assert config.get_independent_ops(dag=dag) == {"A", "E"}
assert dags.get_independent_ops(dag=dag) == {"A", "E"}
assert config.get_orphan_ops(dag=dag) == {"E"}
assert dags.get_orphan_ops(dag=dag) == {"E"}
sorted_dag = config.sort_topologically(dag=dag)
assert config.sort_topologically(dag=dag) == sorted_dag
assert sorted_dag[0] in [["A", "E"], ["E", "A"]]
assert sorted_dag[1] in [["B", "C"], ["C", "B"]]
assert config.dag["A"].op.name == "A"
assert config.dag["A"].op.component_ref.to_dict() == {"hub": "action1"}
assert config.dag["A"].upstream == set()
assert config.dag["A"].downstream == {"B", "C"}
assert config.dag["B"].op.name == "B"
assert config.dag["B"].op.component_ref.to_dict() == {"hub": "event1"}
assert config.dag["B"].upstream == {"A"}
assert config.dag["B"].downstream == set()
assert config.dag["C"].op.name == "C"
assert config.dag["C"].op.component_ref.to_dict() == {"name": "foo"}
assert config.dag["C"].upstream == {"A", "E"}
assert config.dag["C"].downstream == set()
def test_dag_with_duplicate_job_names(self):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"name": "build-template1"}, "name": "A"},
{"component_ref": {"name": "build-template1"}, "name": "A"},
],
"components": [
{
"name": "build-template",
"description": "description build",
"tags": ["tag11", "tag12"],
"container": {"image": "test"},
},
{
"name": "job-template",
"description": "description build",
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(PolyaxonSchemaError):
config.process_components()
def test_dag_with_op_requesting_undefined_template(self):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"name": "build-template1"}, "name": "A"},
{"component_ref": {"name": "build-template1"}, "name": "B"},
],
"components": [
{
"kind": "component",
"name": "build-template2",
"description": "description build",
"tags": ["kaniko"],
"environment": {"registry": "A"},
"mounts": {"artifacts": [{"name": "data2"}]},
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(PolyaxonSchemaError):
config.process_components()
def test_dag_with_template_not_defining_inputs(self):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"name": "build-template"}, "name": "A"},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
},
],
"components": [
{"name": "job-template", "container": {"image": "test"}},
{
"name": "build-template",
"tags": ["kaniko"],
"init": {"repos": [{"name": "foo", "branch": "dev"}]},
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
# Trying to set op template before processing components
with self.assertRaises(PolyaxonSchemaError):
config.set_op_component("A")
config.process_components()
# Trying to set op template before processing dag
with self.assertRaises(PolyaxonSchemaError):
config.set_op_component("A")
config.process_dag()
assert config.dag["A"].op.component is None
assert config.dag["B"].op.component is None
config.set_op_component("A")
assert config.dag["B"].op.component is None
assert config.dag["A"].op.component is not None
assert (
config.dag["A"].op.component
== config._components_by_names["build-template"]
)
config.set_op_component("B")
assert config.dag["B"].op.component is not None
assert (
config.dag["B"].op.component == config._components_by_names["job-template"]
)
def test_dag_with_template_not_defining_inputs_and_ops_with_params(self):
config_dict = {
"kind": "dag",
"ops": [
{
"name": "A",
"component": {
"kind": "component",
"name": "build-template",
"tags": ["kaniko"],
"container": {"image": "test"},
},
},
{
"name": "B",
"dependencies": ["A"],
"params": {
"param1": "{{ ops.A.outputs.x }}",
"param2": 12,
"param3": "https://foo.com",
},
"component": {
"kind": "component",
"name": "job-template",
"container": {"image": "test"},
},
},
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_pipelines_with_template_not_defining_inputs_and_ops_with_params_template(
self
):
config_dict = {
"kind": "dag",
"ops": [
{"component_ref": {"name": "build-template"}, "name": "A"},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {
"param1": "{{ ops.A.outputs.x }}",
"param2": 12,
"param3": "https://foo.com",
},
},
],
"components": [
{"name": "job-template", "container": {"image": "test"}},
{
"name": "build-template",
"tags": ["kaniko"],
"container": {"image": "test"},
},
],
}
config = DagConfig.from_dict(config_dict)
assert config.to_light_dict() == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_ops_template_required_inputs(self):
config_dict = {
"kind": "dag",
"ops": [
{
"name": "A",
"component": {
"kind": "component",
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
}
],
"container": {"image": "test"},
},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_pipelines_with_ops_template_required_inputs_template(self):
config_dict = {
"kind": "dag",
"ops": [{"component_ref": {"name": "job-template"}, "name": "A"}],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
}
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_ops_template_optional_inputs(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"component": {
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
"is_optional": True,
"value": 12.2,
}
],
"container": {"image": "test"},
},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
config.process_components()
def test_pipelines_with_ops_template_optional_inputs_template(self):
config_dict = {
"kind": "dag",
"ops": [{"component_ref": {"name": "job-template"}, "name": "A"}],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
"is_optional": True,
"value": 12.2,
}
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
config.process_components()
def test_dag_with_ops_template_optional_inputs_and_wrong_param(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {"input1": "foo"},
"component": {
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
"is_optional": True,
"value": 12.2,
}
],
"container": {"image": "test"},
},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_pipelines_with_ops_template_optional_inputs_and_wrong_param_components(
self
):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {"input1": "foo"},
}
],
"components": [
{
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": "some text",
"type": IOTypes.FLOAT,
"is_optional": True,
"value": 12.2,
}
],
"container": {"image": "test"},
}
],
}
config = DagConfig.from_dict(config_dict)
config_to_light = config.to_light_dict()
assert config_to_light == config_dict
with self.assertRaises(ValidationError):
config.process_components()
def test_dag_with_ops_template_validation(self):
config_dict = {
"kind": "dag",
"ops": [
{
"component_ref": {"name": "job-template"},
"name": "A",
"params": {"input1": "sdf", "input2": 12.0, "input3": False},
},
{
"component_ref": {"name": "job-template"},
"name": "B",
"dependencies": ["A"],
"params": {"input1": "ooo", "input2": 12.123},
},
],
"components": [
{
"kind": "component",
"name": "job-template",
"inputs": [
{
"name": "input1",
"description": | |
<filename>differint/differint.py
import numpy as np
import math
def isInteger(n):
assert (n >= 0 and (type(n) is type(0))), "n must be a positive integer or zero: %r" % n
def checkValues(alpha, domain_start, domain_end, num_points):
""" Type checking for valid inputs. """
assert type(num_points) is type(1), "num_points is not an integer: %r" % num_points
assert type(domain_start) is type(0.0) \
or type(domain_start) is type(0), "domain_start must be integer or float: %r" % domain_start
assert type(domain_end) is type(0.0) \
or type(domain_end) is type(0), "domain_end must be integer or float: %r" % domain_end
# Currently there is no support for complex orders (17 Jan 2017).
assert type(alpha) is not type(1+1j), "alpha must be real: %r" % alpha
return
def functionCheck(f_name, domain_start, domain_end, num_points):
""" Check if function is callable and assign function values. """
# Define the function domain and obtain function values.
if hasattr(f_name, '__call__'):
# If f_name is callable, call it and save to a list.
x = np.linspace(domain_start, domain_end, num_points)
f_values = list(map(lambda t: f_name(t), x))
step_size = x[1] - x[0]
else:
num_points = np.size(f_name)
f_values = f_name
step_size = (domain_end - domain_start)/(num_points-1)
return f_values, step_size
def poch(a,n):
""" Returns the Pochhammer symbol (a)_n. """
# First, check if 'a' is a real number (this is currently only working for reals).
assert type(a) is not type(1+1j), "a must be real: %r" % a
isInteger(n)
# Compute the Pochhammer symbol.
if n == 0:
return 1.0
else:
poch = 1
for j in range(n):
poch *= (a + j)
return poch
def GLcoeffs(alpha,n):
""" Computes the GL coefficient array of size n.
These coefficients can be used for both the GL
and the improved GL algorithm.
"""
# Validate input.
isInteger(n)
# Get generalized binomial coefficients.
GL_filter = np.zeros(n+1,)
GL_filter[0] = 1
for i in range(n):
GL_filter[i+1] = GL_filter[i]*(-alpha + i)/(i+1)
return GL_filter
def GLpoint(alpha, f_name, domain_start = 0., domain_end = 1., num_points = 100):
""" Computes the GL fractional derivative of a function at a point.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> DF_poly = GLpoint(-0.5, lambda x: 3*x**2 - 9*x + 2)
>>> DF_sqrt = GLpoint(0.5, lambda x: np.sqrt(x), 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
# Calculate the GL differintegral, avoiding the explicit calculation of
# the gamma function.
GL_previous = f_values[1]
for index in range(2,num_points):
GL_current = GL_previous*(num_points-alpha-index-1)/(num_points-index) + f_values[index]
GL_previous = GL_current
return GL_current*(num_points/(domain_end - domain_start))**alpha
def GL(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
""" Computes the GL fractional derivative of a function for an entire array
of function values.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> DF_poly = GL(-0.5, lambda x: x**2 - 1)
>>> DF_sqrt = GL(0.5, lambda x: np.sqrt(x), 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
# Get the convolution filter.
b_coeffs = GLcoeffs(alpha, num_points-1)
# Real Fourier transforms for convolution filter and array of function values.
B = np.fft.rfft(b_coeffs)
F = np.fft.rfft(f_values)
result = np.fft.irfft(F*B)*step_size**-alpha
return result
def GLI(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
""" Computes the 'improved' GL fractional derivative of a function for an
entire array of function values. The 'improved' definition uses the
3-point Lagrange interpolation found in:
<NAME>. & <NAME>. (1974). The Fractional Calculus: Theory
and Applications of Differentiation and Integration to Arbitrary
Order. Academic Press, Inc.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> GLI_poly = GLI(-0.5, lambda x: x**2 - 1)
>>> GLI_sqrt = GLI(0.5, lambda x: np.sqrt(x), 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
# Get interpolating values.
IN = GLIinterpolat(0.5)
I = [IN.prv,IN.crr,IN.nxt]
# Get array of generalized binomial coefficients.
b_coeffs = GLcoeffs(0.5,num_points)
# Calculate the improved GL differintegral using convolution.
GLI = np.zeros(num_points)
for i in range(3,num_points):
F = f_values[:i]
L = len(F)
B = b_coeffs[:(L-2)]
G = np.convolve(F,B,'valid')
GLI[i] = sum(G*I)
return GLI*step_size**-alpha
def RLcoeffs(index_k, index_j, alpha):
"""Calculates coefficients for the RL differintegral operator.
see <NAME>., <NAME>., <NAME>., and <NAME>. (2012). Fractional
Calculus: Models and Numerical Methods. World Scientific.
"""
if index_j == 0:
return ((index_k-1)**(1-alpha)-(index_k+alpha-1)*index_k**-alpha)
elif index_j == index_k:
return 1
else:
return ((index_k-index_j+1)**(1-alpha)+(index_k-index_j-1)**(1-alpha)-2*(index_k-index_j)**(1-alpha))
def RLpoint(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
"""Calculate the RL differintegral at a point with the trapezoid rule.
Parameters
==========
alpha : float
The order of the differintegral to be computed.
f_name : function handle, lambda function, list, or 1d-array of
function values
This is the function that is to be differintegrated.
domain_start : float
The left-endpoint of the function domain. Default value is 0.
domain_end : float
The right-endpoint of the function domain; the point at which the
differintegral is being evaluated. Default value is 1.
num_points : integer
The number of points in the domain. Default value is 100.
Examples:
>>> RL_sqrt = RLpoint(0.5, lambda x: np.sqrt(x))
>>> RL_poly = RLpoint(0.5, lambda x: x**2 - 4*x - 1, 0., 1., 100)
"""
# Flip the domain limits if they are in the wrong order.
if domain_start > domain_end:
domain_start, domain_end = domain_end, domain_start
# Check inputs.
checkValues(alpha, domain_start, domain_end, num_points)
f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points)
C = 1/math.gamma(2-alpha)
RL = 0
for index_j in range(num_points):
coeff = RLcoeffs(num_points-1, index_j, alpha)
RL += coeff*f_values[index_j]
RL *= C*step_size**-alpha
return RL
def RLmatrix(alpha, N):
""" Define the coefficient matrix for the RL algorithm. """
coeffMatrix = np.zeros((N,N))
for i in range(N):
for j in range(i):
coeffMatrix[i,j] = RLcoeffs(i,j,alpha)
# Place 1 on the main diagonal.
np.fill_diagonal(coeffMatrix,1)
return coeffMatrix/math.gamma(2-alpha)
def RL(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100):
""" Calculate the RL | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""parking_model_based.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/eleurent/highway-env/blob/master/scripts/parking_model_based.ipynb
# Model-Based Reinforcement Learning
## Principle
We consider the optimal control problem of an MDP with a **known** reward function $R$ and subject to **unknown deterministic** dynamics $s_{t+1} = f(s_t, a_t)$:
$$\max_{(a_0,a_1,\dotsc)} \sum_{t=0}^\infty \gamma^t R(s_t,a_t)$$
In **model-based reinforcement learning**, this problem is solved in **two steps**:
1. **Model learning**:
We learn a model of the dynamics $f_\theta \simeq f$ through regression on interaction data.
2. **Planning**:
We leverage the dynamics model $f_\theta$ to compute the optimal trajectory $$\max_{(a_0,a_1,\dotsc)} \sum_{t=0}^\infty \gamma^t R(\hat{s}_t,a_t)$$ following the learnt dynamics $\hat{s}_{t+1} = f_\theta(\hat{s}_t, a_t)$.
(We can easily extend to unknown rewards and stochastic dynamics, but we consider the simpler case in this notebook for ease of presentation)
## Motivation
### Sparse rewards
* In model-free reinforcement learning, we only obtain a reinforcement signal when encountering rewards. In environment with **sparse rewards**, the chance of obtaining a reward randomly is **negligible**, which prevents any learning.
* However, even in the **absence of rewards** we still receive a **stream of state transition data**. We can exploit this data to learn about the task at hand.
### Complexity of the policy/value vs dynamics:
Is it easier to decide which action is best, or to predict what is going to happen?
* Some problems can have **complex dynamics** but a **simple optimal policy or value function**. For instance, consider the problem of learning to swim. Predicting the movement requires understanding fluid dynamics and vortices while the optimal policy simply consists in moving the limbs in sync.
* Conversely, other problems can have **simple dynamics** but **complex policies/value functions**. Think of the game of Go, its rules are simplistic (placing a stone merely changes the board state at this location) but the corresponding optimal policy is very complicated.
Intuitively, model-free RL should be applied to the first category of problems and model-based RL to the second category.
### Inductive bias
Oftentimes, real-world problems exhibit a particular **structure**: for instance, any problem involving motion of physical objects will be **continuous**. It can also be **smooth**, **invariant** to translations, etc. This knowledge can then be incorporated in machine learning models to foster efficient learning. In contrast, there can often be **discontinuities** in the policy decisions or value function: e.g. think of a collision vs near-collision state.
### Sample efficiency
Overall, it is generally recognized that model-based approaches tend to **learn faster** than model-free techniques (see e.g. [[Sutton, 1990]](http://papersdb.cs.ualberta.ca/~papersdb/uploaded_files/paper_p160-sutton.pdf.stjohn)).
### Interpretability
In real-world applications, we may want to know **how a policy will behave before actually executing it**, for instance for **safety-check** purposes. However, model-free reinforcement learning only recommends which action to take at current time without being able to predict its consequences. In order to obtain the trajectory, we have no choice but executing the policy. In stark contrast, model-based methods a more interpretable in the sense that we can probe the policy for its intended (and predicted) trajectory.
## Our challenge: Automated Parking System
We consider the **parking-v0** task of the [highway-env](https://github.com/eleurent/highway-env) environment. It is a **goal-conditioned continuous control** task where an agent **drives a car** by controlling the gaz pedal and steering angle and must **park in a given location** with the appropriate heading.
This MDP has several properties wich justifies using model-based methods:
* The policy/value is highly dependent on the goal which adds a significant level of complexity to a model-free learning process, whereas the dynamics are completely independent of the goal and hence can be simpler to learn.
* In the context of an industrial application, we can reasonably expect for safety concerns that the planned trajectory is required to be known in advance, before execution.
### Warming up
We start with a few useful installs and imports:
"""
# Commented out IPython magic to ensure Python compatibility.
# Install environment and visualization dependencies
# !pip install highway-env
# !pip install gym pyvirtualdisplay
# !apt-get update
# !apt-get install -y xvfb python-opengl ffmpeg -y
# Environment
import gym
import highway_env
# Models and computation
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import namedtuple
# torch.set_default_tensor_type("torch.cuda.FloatTensor")
# Visualization
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
from tqdm.notebook import trange
from IPython import display as ipythondisplay
from pyvirtualdisplay import Display
from gym.wrappers import Monitor
import base64
# IO
from pathlib import Path
"""We also define a simple helper function for visualization of episodes:"""
# display = Display(visible=0, size=(1400, 900))
# display.start()
def show_video(path):
html = []
for mp4 in Path(path).glob("*.mp4"):
video_b64 = base64.b64encode(mp4.read_bytes())
html.append('''<video alt="{}" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{}" type="video/mp4" />
</video>'''.format(mp4, video_b64.decode('ascii')))
ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))
"""### Let's try it!
Make the environment, and run an episode with random actions:
"""
env = gym.make("parking-v0")
env = Monitor(env, './video', force=True, video_callable=lambda episode: True)
env.reset()
done = False
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.render()
env.close()
# show_video('./video')
"""The environment is a `GoalEnv`, which means the agent receives a dictionary containing both the current `observation` and the `desired_goal` that conditions its policy."""
print("Observation format:", obs)
"""There is also an `achieved_goal` that won't be useful here (it only serves when the state and goal spaces are different, as a projection from the observation to the goal space).
Alright! We are now ready to apply the model-based reinforcement learning paradigm.
## Experience collection
First, we randomly interact with the environment to produce a batch of experiences
$$D = \{s_t, a_t, s_{t+1}\}_{t\in[1,N]}$$
"""
Transition = namedtuple('Transition', ['state', 'action', 'next_state'])
def collect_interaction_data(env, size=1000, action_repeat=2):
data, done = [], True
for _ in trange(size, desc="Collecting interaction data"):
# for _ in range(size):
action = env.action_space.sample()
for _ in range(action_repeat):
previous_obs = env.reset() if done else obs
obs, reward, done, info = env.step(action)
data.append(Transition(torch.Tensor(previous_obs["observation"]),
torch.Tensor(action),
torch.Tensor(obs["observation"])))
return data
data = collect_interaction_data(env)
print("Sample transition:", data[0])
"""## Build a dynamics model
We now design a model to represent the system dynamics. We choose a **structured model** inspired from *Linear Time-Invariant (LTI) systems*
$$\dot{x} = f_\theta(x, u) = A_\theta(x, u)x + B_\theta(x, u)u$$
where the $(x, u)$ notation comes from the Control Theory community and stands for the state and action $(s,a)$. Intuitively, we learn at each point $(x_t, u_t)$ the **linearization** of the true dynamics $f$ with respect to $(x, u)$.
We parametrize $A_\theta$ and $B_\theta$ as two fully-connected networks with one hidden layer.
"""
class DynamicsModel(nn.Module):
STATE_X = 0
STATE_Y = 1
def __init__(self, state_size, action_size, hidden_size, dt):
super().__init__()
self.state_size, self.action_size, self.dt = state_size, action_size, dt
A_size, B_size = state_size * state_size, state_size * action_size
self.A1 = nn.Linear(state_size + action_size, hidden_size)
self.A2 = nn.Linear(hidden_size, A_size)
self.B1 = nn.Linear(state_size + action_size, hidden_size)
self.B2 = nn.Linear(hidden_size, B_size)
def forward(self, x, u):
"""
Predict x_{t+1} = f(x_t, u_t)
:param x: a batch of states
:param u: a batch of actions
"""
xu = torch.cat((x, u), -1)
xu[:, self.STATE_X:self.STATE_Y+1] = 0 # Remove dependency in (x,y)
A = self.A2(F.relu(self.A1(xu)))
A = torch.reshape(A, (x.shape[0], self.state_size, self.state_size))
B = self.B2(F.relu(self.B1(xu)))
B = torch.reshape(B, (x.shape[0], self.state_size, self.action_size))
dx = A @ x.unsqueeze(-1) + B @ u.unsqueeze(-1)
return x + dx.squeeze()*self.dt
dynamics = DynamicsModel(state_size=env.observation_space.spaces["observation"].shape[0],
action_size=env.action_space.shape[0],
hidden_size=64,
dt=1/env.unwrapped.config["policy_frequency"])
print("Forward initial model on a sample transition:", dynamics(data[0].state.unsqueeze(0),
data[0].action.unsqueeze(0)).detach())
"""## Fit the model on data
We can now train our model $f_\theta$ in a supervised fashion to minimize an MSE loss $L^2(f_\theta; D)$ over our experience batch $D$ by stochastic gradient descent:
$$L^2(f_\theta; D) = \frac{1}{|D|}\sum_{s_t,a_t,s_{t+1}\in D}||s_{t+1}- f_\theta(s_t, a_t)||^2$$
"""
optimizer = torch.optim.Adam(dynamics.parameters(), lr=0.01)
# Split dataset into training and validation
train_ratio = 0.7
train_data, validation_data = data[:int(train_ratio * len(data))], \
data[int(train_ratio * len(data)):]
def compute_loss(model, data_t, loss_func = torch.nn.MSELoss()):
states, actions, next_states = data_t
predictions = model(states, actions)
return loss_func(predictions, next_states)
def transpose_batch(batch):
return Transition(*map(torch.stack, zip(*batch)))
def train(model, train_data, validation_data, epochs=1500):
train_data_t = transpose_batch(train_data)
validation_data_t = transpose_batch(validation_data)
losses = np.full((epochs, 2), np.nan)
for epoch in trange(epochs, desc="Train dynamics"):
# for epoch in range(epochs):
# Compute loss gradient and step optimizer
loss = compute_loss(model, train_data_t)
validation_loss = compute_loss(model, validation_data_t)
losses[epoch] = [loss.detach().numpy(), validation_loss.detach().numpy()]
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Plot losses
plt.plot(losses)
plt.yscale("log")
plt.xlabel("epochs")
plt.ylabel("loss")
plt.legend(["training", "validation"])
plt.show()
train(dynamics, data, validation_data)
"""## Visualize trained dynamics
In order to qualitatively evaluate our model, we can choose some values of steering angle *(right, center, left)* and acceleration *(slow, fast)* in order to predict and visualize the corresponding trajectories from an initial state.
"""
def predict_trajectory(state, actions, model, action_repeat=1):
| |
<reponame>monotropauniflora/PartSeg<filename>package/PartSeg/common_gui/algorithms_description.py
import collections
import typing
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from enum import Enum
from qtpy.QtCore import Signal
from qtpy.QtGui import QHideEvent, QPainter, QPaintEvent
from qtpy.QtWidgets import (
QApplication,
QCheckBox,
QComboBox,
QFormLayout,
QLabel,
QLineEdit,
QMessageBox,
QScrollArea,
QStackedLayout,
QVBoxLayout,
QWidget,
)
from six import with_metaclass
from PartSeg.common_gui.error_report import ErrorDialog
from PartSegCore.algorithm_describe_base import AlgorithmDescribeBase, AlgorithmProperty, SegmentationProfile
from PartSegCore.channel_class import Channel
from PartSegCore.image_operations import RadiusType
from PartSegCore.segmentation.algorithm_base import (
SegmentationAlgorithm,
SegmentationLimitException,
SegmentationResult,
)
from PartSegImage import Image
from ..common_backend.base_settings import BaseSettings
from ..common_backend.segmentation_thread import SegmentationThread
from .dim_combobox import DimComboBox
from .universal_gui_part import ChannelComboBox, CustomDoubleSpinBox, CustomSpinBox, EnumComboBox
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
class QtAlgorithmProperty(AlgorithmProperty):
qt_class_dict = {
int: CustomSpinBox,
float: CustomDoubleSpinBox,
list: QComboBox,
bool: QCheckBox,
RadiusType: DimComboBox,
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._widget = self._get_field()
self.change_fun = self.get_change_signal(self._widget)
self._getter, self._setter = self.get_getter_and_setter_function(self._widget)
self._setter(self._widget, self.default_value)
def get_value(self):
return self._getter(self._widget)
def recursive_get_values(self):
if isinstance(self._widget, SubAlgorithmWidget):
return self._widget.recursive_get_values()
else:
return self.get_value()
def set_value(self, val):
"""set value of widget """
return self._setter(self._widget, val)
def get_field(self) -> QWidget:
"""
Get representing widget
:return:
:rtype:
"""
return self._widget
@classmethod
def from_algorithm_property(cls, ob):
"""
Create class instance base on :py:class:`.AlgorithmProperty` instance
:type ob: AlgorithmProperty | str
:param ob: AlgorithmProperty object or label
:return: QtAlgorithmProperty | QLabel
"""
if isinstance(ob, AlgorithmProperty):
return cls(
name=ob.name,
user_name=ob.user_name,
default_value=ob.default_value,
options_range=ob.range,
single_steep=ob.single_step,
property_type=ob.value_type,
possible_values=ob.possible_values,
help_text=ob.help_text,
per_dimension=ob.per_dimension,
)
elif isinstance(ob, str):
return QLabel(ob)
raise ValueError(f"unknown parameter type {type(ob)} of {ob}")
def _get_field(self) -> QWidget:
"""
Get proper widget for given field type. Overwrite if would like to support new data types.
"""
if self.per_dimension:
self.per_dimension = False
prop = self.from_algorithm_property(self)
self.per_dimension = True
res = ListInput(prop, 3)
elif issubclass(self.value_type, Channel):
res = ChannelComboBox()
res.change_channels_num(10)
return res
elif issubclass(self.value_type, AlgorithmDescribeBase):
res = SubAlgorithmWidget(self)
elif issubclass(self.value_type, bool):
res = QCheckBox()
elif issubclass(self.value_type, int):
res = CustomSpinBox()
if not isinstance(self.default_value, int):
raise ValueError(
f"Incompatible types. default_value should be type of int. Is {type(self.default_value)}"
)
if self.range is not None:
res.setRange(*self.range)
elif issubclass(self.value_type, float):
res = CustomDoubleSpinBox()
if not isinstance(self.default_value, float):
raise ValueError(
f"Incompatible types. default_value should be type of float. Is {type(self.default_value)}"
)
if self.range is not None:
res.setRange(*self.range)
elif issubclass(self.value_type, str):
res = QLineEdit()
elif issubclass(self.value_type, Enum):
res = EnumComboBox(self.value_type)
# noinspection PyUnresolvedReferences
elif issubclass(self.value_type, list):
res = QComboBox()
res.addItems(list(map(str, self.possible_values)))
else:
raise ValueError(f"Unknown class: {self.value_type}")
tool_tip_text = ""
if self.help_text:
tool_tip_text = self.help_text
tool_tip_text += f" default value: {str(self.default_value)}"
res.setToolTip(tool_tip_text)
return res
@staticmethod
def get_change_signal(widget: QWidget):
if isinstance(widget, QComboBox):
return widget.currentIndexChanged
elif isinstance(widget, QCheckBox):
return widget.stateChanged
elif isinstance(widget, (CustomDoubleSpinBox, CustomSpinBox)):
return widget.valueChanged
elif isinstance(widget, QLineEdit):
return widget.textChanged
elif isinstance(widget, SubAlgorithmWidget):
return widget.values_changed
elif isinstance(widget, ListInput):
return widget.change_signal
raise ValueError(f"Unsupported type: {type(widget)}")
@staticmethod
def get_getter_and_setter_function(
widget: QWidget,
) -> typing.Tuple[
typing.Callable[[QWidget,], typing.Any], typing.Callable[[QWidget, typing.Any], None] # noqa E231
]:
"""
For each widget type return proper functions. This functions need instance as first argument
:return: (getter, setter)
"""
if isinstance(widget, ChannelComboBox):
return widget.__class__.get_value, widget.__class__.set_value
if isinstance(widget, EnumComboBox):
return widget.__class__.get_value, widget.__class__.set_value
if isinstance(widget, QComboBox):
return widget.__class__.currentText, widget.__class__.setCurrentText
elif isinstance(widget, QCheckBox):
return widget.__class__.isChecked, widget.__class__.setChecked
elif isinstance(widget, CustomSpinBox):
return widget.__class__.value, widget.__class__.setValue
elif isinstance(widget, CustomDoubleSpinBox):
return widget.__class__.value, widget.__class__.setValue
elif isinstance(widget, QLineEdit):
return widget.__class__.text, widget.__class__.setText
elif isinstance(widget, SubAlgorithmWidget):
return widget.__class__.get_values, widget.__class__.set_values
elif isinstance(widget, ListInput):
return widget.__class__.get_value, widget.__class__.set_value
raise ValueError(f"Unsupported type: {type(widget)}")
class ListInput(QWidget):
change_signal = Signal()
def __init__(self, property_el: QtAlgorithmProperty, length):
super().__init__()
self.input_list = [property_el.from_algorithm_property(property_el) for _ in range(length)]
layout = QVBoxLayout()
for el in self.input_list:
el.change_fun.connect(self.change_signal.emit)
layout.addWidget(el.get_field())
self.setLayout(layout)
def get_value(self):
return [x.get_value() for x in self.input_list]
def set_value(self, value):
if not isinstance(value, (list, tuple)):
value = [value for _ in range(len(self.input_list))]
for f, val in zip(self.input_list, value):
f.set_value(val)
def any_arguments(fun):
def _any(*_):
fun()
return _any
class FormWidget(QWidget):
value_changed = Signal()
def __init__(self, fields: typing.List[AlgorithmProperty], start_values=None, dimension_num=1):
super().__init__()
if start_values is None:
start_values = {}
self.widgets_dict: typing.Dict[str, QtAlgorithmProperty] = dict()
self.channels_chose: typing.List[typing.Union[ChannelComboBox, SubAlgorithmWidget]] = []
layout = QFormLayout()
layout.setContentsMargins(10, 0, 10, 0)
# layout.setVerticalSpacing(0)
element_list = map(QtAlgorithmProperty.from_algorithm_property, fields)
for el in element_list:
if isinstance(el, QLabel):
layout.addRow(el)
elif isinstance(el.get_field(), SubAlgorithmWidget):
label = QLabel(el.user_name)
if el.help_text:
label.setToolTip(el.help_text)
layout.addRow(label, el.get_field().choose)
layout.addRow(el.get_field())
self.widgets_dict[el.name] = el
if el.name in start_values:
el.get_field().set_starting(start_values[el.name])
el.change_fun.connect(any_arguments(self.value_changed.emit))
else:
self.widgets_dict[el.name] = el
label = QLabel(el.user_name)
if el.help_text:
label.setToolTip(el.help_text)
layout.addRow(label, el.get_field())
# noinspection PyUnresolvedReferences
if issubclass(el.value_type, Channel):
# noinspection PyTypeChecker
self.channels_chose.append(el.get_field())
if el.name in start_values:
try:
el.set_value(start_values[el.name])
except (KeyError, ValueError, TypeError):
pass
el.change_fun.connect(any_arguments(self.value_changed.emit))
self.setLayout(layout)
self.value_changed.connect(self.update_size)
def has_elements(self):
return len(self.widgets_dict) > 0
def update_size(self):
self.setMinimumHeight(self.layout().minimumSize().height())
def get_values(self):
return {name: el.get_value() for name, el in self.widgets_dict.items()}
def recursive_get_values(self):
return {name: el.recursive_get_values() for name, el in self.widgets_dict.items()}
def set_values(self, values: dict):
for name, value in values.items():
if name in self.widgets_dict:
self.widgets_dict[name].set_value(value)
def image_changed(self, image: Image):
if not image:
return
for channel_widget in self.channels_chose:
if isinstance(channel_widget, ChannelComboBox):
channel_widget.change_channels_num(image.channels)
else:
channel_widget.change_channels_num(image)
class SubAlgorithmWidget(QWidget):
values_changed = Signal()
def __init__(self, algorithm_property: AlgorithmProperty):
super().__init__()
if not isinstance(algorithm_property.possible_values, dict):
raise ValueError(
"algorithm_property.possible_values should be dict." f"It is {type(algorithm_property.possible_values)}"
)
if not isinstance(algorithm_property.default_value, str):
raise ValueError(
"algorithm_property.default_value should be str." f"It is {type(algorithm_property.default_value)}"
)
self.starting_values = {}
self.property = algorithm_property
self.widgets_dict: typing.Dict[str, FormWidget] = {}
# TODO protect for recursion
widget = FormWidget(algorithm_property.possible_values[algorithm_property.default_value].get_fields())
widget.layout().setContentsMargins(0, 0, 0, 0)
widget.value_changed.connect(self.values_changed)
self.widgets_dict[algorithm_property.default_value] = widget
self.choose = QComboBox(self)
self.choose.addItems(list(algorithm_property.possible_values.keys()))
self.setContentsMargins(0, 0, 0, 0)
self.choose.setCurrentText(algorithm_property.default_value)
self.choose.currentTextChanged.connect(self.algorithm_choose)
# self.setStyleSheet("border: 1px solid red")
layout = QVBoxLayout()
layout.setContentsMargins(4, 4, 4, 4)
layout.addWidget(widget)
if not widget.has_elements():
widget.hide()
self.hide()
tmp_widget = QWidget(self)
# tmp_widget.setMinimumHeight(5000)
layout.addWidget(tmp_widget)
self.tmp_widget = tmp_widget
self.setLayout(layout)
def set_starting(self, starting_values):
self.starting_values = starting_values
def set_values(self, val: dict):
if not isinstance(val, dict):
return
self.choose.setCurrentText(val["name"])
if val["name"] not in self.widgets_dict:
self.algorithm_choose(val["name"])
if val["name"] in self.widgets_dict:
self.widgets_dict[val["name"]].set_values(val["values"])
def recursive_get_values(self):
return {name: el.recursive_get_values() for name, el in self.widgets_dict.items()}
def get_values(self):
name = self.choose.currentText()
values = self.widgets_dict[name].get_values()
return {"name": name, "values": values}
def change_channels_num(self, image: Image):
for i in range(self.layout().count()):
el = self.layout().itemAt(i)
if el.widget() and isinstance(el.widget(), FormWidget):
el.widget().image_changed(image)
def algorithm_choose(self, name):
if name not in self.widgets_dict:
if name not in self.property.possible_values:
return
start_dict = {} if name not in self.starting_values else self.starting_values[name]
try:
self.widgets_dict[name] = FormWidget(
self.property.possible_values[name].get_fields(), start_values=start_dict
)
except KeyError as e:
raise e
self.widgets_dict[name].layout().setContentsMargins(0, 0, 0, 0)
self.layout().addWidget(self.widgets_dict[name])
self.widgets_dict[name].value_changed.connect(self.values_changed)
widget = self.widgets_dict[name]
for i in range(self.layout().count()):
lay_elem = self.layout().itemAt(i)
if lay_elem.widget():
lay_elem.widget().hide()
if widget.has_elements():
self.show()
widget.show()
else:
self.hide()
self.values_changed.emit()
def showEvent(self, _event):
# workaround for changing size
self.tmp_widget.hide()
def paintEvent(self, event: QPaintEvent):
name = self.choose.currentText()
if self.widgets_dict[name].has_elements() and event.rect().top() == 0 and event.rect().left() == 0:
painter = QPainter(self)
painter.drawRect(event.rect())
class AbstractAlgorithmSettingsWidget(with_metaclass(ABCMeta, object)):
def __init__(self):
pass
@abstractmethod
def get_values(self):
"""
:return: dict[str, object]
"""
return dict()
class BaseAlgorithmSettingsWidget(QScrollArea):
values_changed = Signal()
algorithm_thread: SegmentationThread
def __init__(self, settings: BaseSettings, name, algorithm: typing.Type[SegmentationAlgorithm]):
"""
For algorithm which works on one channel
"""
super().__init__()
self.settings = settings
self.widget_list = []
self.name = name
self.algorithm = algorithm
main_layout = QVBoxLayout()
self.info_label = QLabel()
self.info_label.setHidden(True)
main_layout.addWidget(self.info_label)
start_values = settings.get(f"algorithm_widget_state.{name}", dict())
self.form_widget = FormWidget(algorithm.get_fields(), start_values=start_values)
self.form_widget.value_changed.connect(self.values_changed.emit)
# self.form_widget.setMinimumHeight(1500)
self.setWidget(self.form_widget)
value_dict = self.settings.get(f"algorithms.{self.name}", {})
self.set_values(value_dict)
# self.settings.image_changed[Image].connect(self.image_changed)
self.algorithm_thread = SegmentationThread(algorithm())
self.algorithm_thread.info_signal.connect(self.show_info)
self.algorithm_thread.exception_occurred.connect(self.exception_occurred)
def exception_occurred(self, exc: Exception):
if isinstance(exc, SegmentationLimitException):
mess = QMessageBox()
mess.setIcon(QMessageBox.Critical)
mess.setText("During segmentation process algorithm meet limitations:\n" + "\n".join(exc.args))
mess.setWindowTitle("Segmentation limitations")
mess.exec()
return
if isinstance(exc, RuntimeError) and exc.args[0].startswith(
"Exception thrown in SimpleITK KittlerIllingworthThreshold"
):
mess = QMessageBox()
mess.setIcon(QMessageBox.Critical)
mess.setText("Fail to apply Kittler Illingworth to current data\n" + exc.args[0].split("\n")[1])
mess.setWindowTitle("Segmentation limitations")
mess.exec()
return
dial = ErrorDialog(exc, "Error during segmentation", f"{QApplication.instance().applicationName()}")
dial.exec()
def show_info(self, text):
self.info_label.setText(text)
self.info_label.setVisible(True)
def image_changed(self, image: Image):
self.form_widget.image_changed(image)
self.algorithm_thread.algorithm.set_image(image)
def set_mask(self, mask):
self.algorithm_thread.algorithm.set_mask(mask)
def set_values(self, values_dict):
self.form_widget.set_values(values_dict)
def get_values(self):
return self.form_widget.get_values()
def channel_num(self):
return self.channels_chose.currentIndex()
def execute(self, exclude_mask=None):
values = self.get_values()
self.settings.set(f"algorithms.{self.name}", deepcopy(values))
self.algorithm_thread.set_parameters(**values)
self.algorithm_thread.start()
def hideEvent(self, a0: QHideEvent):
self.algorithm_thread.clean()
def recursive_get_values(self):
return self.form_widget.recursive_get_values()
class AlgorithmSettingsWidget(BaseAlgorithmSettingsWidget):
def execute(self, exclude_mask=None):
self.algorithm_thread.algorithm.set_image(self.settings.image)
super().execute(exclude_mask)
class InteractiveAlgorithmSettingsWidget(BaseAlgorithmSettingsWidget):
algorithm_thread: SegmentationThread
def __init__(self, settings, name, algorithm: typing.Type[SegmentationAlgorithm], selector: typing.List[QWidget]):
super().__init__(settings, name, algorithm)
self.selector = selector
self.algorithm_thread.finished.connect(self.enable_selector)
self.algorithm_thread.started.connect(self.disable_selector)
# noinspection PyUnresolvedReferences
if hasattr(settings, "mask_changed"):
settings.mask_changed.connect(self.change_mask)
def value_updated(self):
if not self.parent().interactive:
return
self.execute()
def change_mask(self):
if not self.isVisible():
return
self.algorithm_thread.algorithm.set_mask(self.settings.mask)
def disable_selector(self):
for el in self.selector:
el.setDisabled(True)
def enable_selector(self):
for el in self.selector:
el.setEnabled(True)
def get_segmentation_profile(self) -> SegmentationProfile:
return SegmentationProfile("", self.algorithm.get_name(), self.get_values())
class AlgorithmChoose(QWidget):
finished = Signal()
started = Signal()
result = Signal(SegmentationResult)
value_changed = Signal()
progress_signal = | |
<reponame>nazihkalo/Crypto-Social-Scraper-App
import json
import os
from pathlib import Path
import time
from typing import List
import pandas as pd
import requests
from dotenv import load_dotenv
from rich import print
from rich.progress import track
from tqdm import tqdm
# from top_github_scraper.utils import ScrapeGithubUrl, UserProfileGetter, isnotebook
import logging
from pathlib import Path
from dotenv import load_dotenv
from datetime import datetime
from github import Github
from ratelimit import limits
import requests
# get the standard UTC time
# g = Github(TOKEN)
# current_timezone = pytz.timezone('US/Pacific')
# reset_timestamp = g.get_rate_limit().core.reset.astimezone(current_timezone)
# sleep_time = (reset_timestamp - datetime.now(current_timezone)).seconds + 5
repo_list_file = "data/merged_repos.csv"
ONE_HOUR = 3600
from IPython import get_ipython
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
class RepoScraper:
"""Scrape information of repos and the
contributors of those repositories"""
def __init__(
self, repo_urls: list, max_n_top_contributors: int, USERNAME: str, TOKEN: str
):
self.repo_urls = repo_urls
self.max_n_top_contributors = max_n_top_contributors
self.USERNAME = USERNAME
self.TOKEN = TOKEN
self.github = Github(TOKEN)
@limits(calls=5000, period=ONE_HOUR)
def call_api(self, url, *args):
response = requests.get(url, auth=(self.USERNAME, self.TOKEN))
# if response.status_code == 404:
# return "Not Found"
# if response.status_code != 200:
# raise Exception("API response: {}".format(response.status_code))
return response
# This method is used to limit the rate of requests sent to GitHub
def __choke(self):
if self.github.get_rate_limit().core.remaining < 3:
naptime = (
self.github.get_rate_limit().core.reset - datetime.now()
).seconds + 5
print(f"About to exceed rate limit :/ sleeping for {naptime} seconds")
time.sleep(naptime)
print("Done sleeping - back to work!")
def get_all_top_repo_information(self):
top_repo_infos = []
if isnotebook():
for repo_url in tqdm(
self.repo_urls, desc="Scraping top GitHub repositories..."
):
top_repo_infos.append(self._get_repo_information(repo_url))
else:
for repo_url in track(
self.repo_urls, description="Scraping top GitHub repositories..."
):
top_repo_infos.append(self._get_repo_information(repo_url))
print(f"Finished getting repo info for {len(self.repo_urls)} repos!")
return top_repo_infos
def _get_repo_information(self, repo_url: str):
self.__choke()
repo_info_url = f"https://api.github.com/repos{repo_url}"
repo_important_info = {}
try:
repo_resp = self.call_api(repo_info_url)
repo_info = repo_resp.json()
info_to_scrape = [
"created_at",
"updated_at",
"pushed_at",
"size",
"stargazers_count",
"watchers_count",
"language",
"has_issues",
"has_projects",
"has_downloads",
"has_wiki",
"has_pages",
"forks_count",
]
repo_important_info["repo"] = repo_url
for info in info_to_scrape:
repo_important_info[info] = repo_info.get(info, None)
repo_important_info[
"contributors"
] = self._get_contributor_repo_of_one_repo(repo_url)
return repo_important_info
except Exception as e:
print(
f"Request for {repo_url} failed due to {e} - status code {repo_resp.status_code}"
)
repo_important_info["repo"] = repo_url
for info in info_to_scrape:
repo_important_info[info] = "Invalid Repo"
repo_important_info["contributors"] = "Invalid Repo"
return repo_important_info
def _get_contributor_repo_of_one_repo(self, repo_url: str):
self.__choke()
contributor_url = f"https://api.github.com/repos{repo_url}/contributors"
contributor_page_resp = self.call_api(contributor_url)
contributor_page = contributor_page_resp.json()
contributors_info = {"login": [], "url": [], "contributions": []}
try:
max_n_top_contributors = self._find_max_n_top_contributors(
num_contributors=len(contributor_page)
)
n_top_contributor = 0
while n_top_contributor < max_n_top_contributors:
contributor = contributor_page[n_top_contributor]
self._get_contributor_general_info(contributors_info, contributor)
n_top_contributor += 1
return contributors_info
except Exception as e:
print(contributor_page)
print(
f"Failed to retrieve top contributors for {repo_url} due to {e} - status code {contributor_page_resp.status_code}"
)
return contributors_info
@staticmethod
def _get_contributor_general_info(contributors_info: List[dict], contributor: dict):
contributors_info["login"].append(contributor["login"])
contributors_info["url"].append(contributor["url"])
contributors_info["contributions"].append(contributor["contributions"])
def _find_max_n_top_contributors(self, num_contributors: int):
if num_contributors > self.max_n_top_contributors:
return self.max_n_top_contributors
else:
return num_contributors
class RepoStatsScraper:
"""Scrape information of repos and the
contributors of those repositories"""
def __init__(self, USERNAME: str, TOKEN: str, since: datetime = None):
self.USERNAME = USERNAME
self.TOKEN = TOKEN
self.github = Github(TOKEN)
self.since = since
# This method is used to limit the rate of requests sent to GitHub
def __choke(self):
remaining = self.github.get_rate_limit().core.remaining
print(f"There are {remaining} remaining requests before ratelimiting")
if remaining < 3:
naptime = (
self.github.get_rate_limit().core.reset - datetime.now()
).seconds + 5
print(f"About to exceed rate limit :/ sleeping for {naptime} seconds")
time.sleep(naptime)
print("Done sleeping - back to work!")
def _get_repo_weekly_stats(self, repo_url: str):
self.__choke()
try:
if repo_url.startswith("/"):
repo_url = repo_url[1:]
repo = self.github.get_repo(repo_url, lazy=False)
starPages = [
(stargazer.user.login, stargazer.starred_at)
for stargazer in repo.get_stargazers_with_dates()
]
# statsContributorsPages = repo.get_stats_contributors()
statsCommitActivityPages = [
week.raw_data for week in repo.get_stats_commit_activity()
]
statsCodeFrequencyPages = [
week.raw_data for week in repo.get_stats_code_frequency()
]
# Stars over time
if len(starPages) > 0:
stargazer_dates_df = pd.DataFrame(starPages).rename(
columns={0: "stargazer", 1: "starred_at"}
)
stars_by_day = stargazer_dates_df.groupby(
pd.Grouper(key="starred_at", freq="1W")
).agg(
{
"stargazer": [list, "size"],
}
) # stargazer_dates_df.groupby(stargazer_dates_df.starred_at.dt.date).agg({'stargazer':[list, 'size'], })
stars_by_day.columns = [i + "_" + y for i, y in stars_by_day.columns]
stars_by_day.reset_index(inplace=True)
stars_by_day["starred_at"] = pd.to_datetime(stars_by_day.starred_at)
else:
stars_by_day = pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
]
)
### Commit Frequency
if len(statsCommitActivityPages) > 0:
statsCommitActivity_df = pd.DataFrame(statsCommitActivityPages).rename(
columns={"total": "total_commits", "days": "commits_per_day"}
)
statsCommitActivity_df["week"] = pd.to_datetime(
statsCommitActivity_df.week, unit="s"
)
else:
statsCommitActivity_df = pd.DataFrame(
columns=[
"week",
"total_commits",
"commits_per_day",
]
)
### Code frequency
if len(statsCodeFrequencyPages) > 0:
statsCodeFrequencyPages_df = pd.DataFrame(statsCodeFrequencyPages)
statsCodeFrequencyPages_df.rename(
columns={0: "week", 1: "additions", 2: "deletions"}, inplace=True
)
statsCodeFrequencyPages_df["week"] = pd.to_datetime(
statsCodeFrequencyPages_df.week, unit="s"
)
else:
statsCodeFrequencyPages_df = pd.DataFrame(
columns=["week", "additions", "deletions"]
)
# merge data
commits_add_delete_df = pd.merge(
statsCodeFrequencyPages_df,
statsCommitActivity_df,
left_on="week",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df = pd.merge(
stars_by_day,
commits_add_delete_df,
left_on="starred_at",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df["repo_path"] = repo_url
return commits_add_delete_and_stars_df
except Exception as e:
print(f"Request for {repo_url} failed due to {e}")
return pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
"week",
"additions",
"deletions",
"total_commits",
"commits_per_day",
"repo_path",
]
)
def _get_repo_weekly_stats_from_date(self, repo_url: str, from_datetime):
self.__choke()
# from_datetime = datetime.strptime('2022-01-30', '%Y-%m-%d')
# from_datetime = datetime.strptime(since, '%Y-%m-%d')
try:
if repo_url.startswith("/"):
repo_url = repo_url[1:]
repo = self.github.get_repo(repo_url, lazy=False)
# Get reversed order
starPages = []
statsCommitActivityPages = []
statsCodeFrequencyPages = []
stargazersPaginated = repo.get_stargazers_with_dates().reversed
# commitActivityPaginated = reversed(repo.get_stats_commit_activity()[-1].weeks)
# codeFrequencyPaginated = reversed(repo.get_stats_code_frequency()[-1].weeks)
commitActivityPaginated = reversed(repo.get_stats_commit_activity())
codeFrequencyPaginated = reversed(repo.get_stats_code_frequency())
# Only get new info
for stargazer in stargazersPaginated:
if stargazer.starred_at > from_datetime:
starPages.append((stargazer.user.login, stargazer.starred_at))
else:
break
for week in commitActivityPaginated:
if week.week > from_datetime:
statsCommitActivityPages.append(week.raw_data)
else:
break
for week in codeFrequencyPaginated:
if week.week > from_datetime:
statsCodeFrequencyPages.append(week.raw_data)
else:
break
# Stars over time
if len(starPages) > 0:
stargazer_dates_df = pd.DataFrame(starPages).rename(
columns={0: "stargazer", 1: "starred_at"}
)
stars_by_day = stargazer_dates_df.groupby(
pd.Grouper(key="starred_at", freq="1W")
).agg(
{
"stargazer": [list, "size"],
}
) # stargazer_dates_df.groupby(stargazer_dates_df.starred_at.dt.date).agg({'stargazer':[list, 'size'], })
stars_by_day.columns = [i + "_" + y for i, y in stars_by_day.columns]
stars_by_day.reset_index(inplace=True)
stars_by_day["starred_at"] = pd.to_datetime(stars_by_day.starred_at)
else:
stars_by_day = pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
]
)
### Commit Frequency
if len(statsCommitActivityPages) > 0:
statsCommitActivity_df = pd.DataFrame(statsCommitActivityPages).rename(
columns={"total": "total_commits", "days": "commits_per_day"}
)
statsCommitActivity_df["week"] = pd.to_datetime(
statsCommitActivity_df.week, unit="s"
)
else:
statsCommitActivity_df = pd.DataFrame(
columns=[
"week",
"total_commits",
"commits_per_day",
]
)
### Code frequency
if len(statsCodeFrequencyPages) > 0:
statsCodeFrequencyPages_df = pd.DataFrame(statsCodeFrequencyPages)
statsCodeFrequencyPages_df.rename(
columns={0: "week", 1: "additions", 2: "deletions"}, inplace=True
)
statsCodeFrequencyPages_df["week"] = pd.to_datetime(
statsCodeFrequencyPages_df.week, unit="s"
)
else:
statsCodeFrequencyPages_df = pd.DataFrame(
columns=["week", "additions", "deletions"]
)
# merge data
commits_add_delete_df = pd.merge(
statsCodeFrequencyPages_df,
statsCommitActivity_df,
left_on="week",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df = pd.merge(
stars_by_day,
commits_add_delete_df,
left_on="starred_at",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df["repo_path"] = repo_url
return commits_add_delete_and_stars_df
except Exception as e:
print(f"Request for {repo_url} failed due to {e}")
return pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
"week",
"additions",
"deletions",
"total_commits",
"commits_per_day",
"repo_path",
]
)
from dataclasses import dataclass
from bs4 import BeautifulSoup
import requests
from rich.progress import track
from rich import print
import pandas as pd
import os
import warnings
from dotenv import load_dotenv
from typing import List
from IPython import get_ipython
from tqdm import tqdm
import logging
load_dotenv()
warnings.filterwarnings("ignore")
TYPES = ["Users", "Repositories", "Code", "Commits", "Issues", "Packages", "Topics"]
SORT_BY = {"Users": ["followers"], "Repositories": ["", "stars"]}
SCRAPE_CLASS = {"Users": "mr-1", "Repositories": "v-align-middle"}
USERNAME = os.getenv("GITHUB_USERNAME_LIVE")
TOKEN = os.getenv("GITHUB_TOKEN_LIVE")
class ScrapeGithubUrl:
"""Scrape top Github urls based on a certain keyword and type
Parameters
-------
keyword: str
keyword to search on Github
type: str
whether to search for User or Repositories
sort_by: str
sort by best match or most stars, by default 'best_match', which will sort by best match.
Use 'stars' to sort by most stars.
start_page_num: int
page number to start scraping. The default is 0
stop_page_num: int
page number to stop scraping
Returns
-------
List[str]
"""
def __init__(
self,
keyword: str,
type: str,
sort_by: str,
start_page_num: int,
stop_page_num: int,
):
self.keyword = keyword
self.type = type
self.start_page_num = start_page_num
self.stop_page_num = stop_page_num
if sort_by == "best_match":
self.sort_by = ""
else:
self.sort_by = sort_by
@staticmethod
def _keyword_to_url(page_num: int, keyword: str, type: str, sort_by: str):
"""Change keyword to a url"""
keyword_no_space = ("+").join(keyword.split(" "))
return f"https://github.com/search?o=desc&p={str(page_num)}&q={keyword_no_space}&s={sort_by}&type={type}"
def _scrape_top_repo_url_one_page(self, page_num: int):
"""Scrape urls of top Github repositories in 1 page"""
url = self._keyword_to_url(
page_num, self.keyword, type=self.type, sort_by=self.sort_by
)
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
a_tags = soup.find_all("a", class_=SCRAPE_CLASS[self.type])
urls = [a_tag.get("href") for a_tag in a_tags]
return urls
def scrape_top_repo_url_multiple_pages(self):
"""Scrape urls of top Github repositories in multiple pages"""
urls = []
if isnotebook():
for page_num in tqdm(
range(self.start_page_num, self.stop_page_num),
| |
<filename>pygame_vkeyboard/vtextinput.py<gh_stars>0
#!/usr/bin/env python
# coding: utf8
"""
Text box to display the current text. The mouse events are
supported to move the cursor at the desired place.
"""
import pygame # pylint: disable=import-error
from .vrenderers import VKeyboardRenderer
class VBackground(pygame.sprite.DirtySprite):
"""Background of the text input box. It is used to create
borders by making its size a litle bit larger than the
lines width and the sum of lines heights.
"""
def __init__(self, size, renderer):
"""Default constructor.
Parameters
----------
size:
Size tuple (width, height) of the background.
renderer:
Renderer used to draw the background.
"""
super(VBackground, self).__init__()
self.renderer = renderer
self.image = pygame.Surface(size, pygame.SRCALPHA, 32)
self.rect = pygame.Rect((0, 0), size)
self.renderer.draw_background(self.image)
def set_rect(self, x, y, width, height):
"""Set the background absolute position and size.
Parameters
----------
x:
Position x.
y:
Position y.
width:
Background width.
height:
Background height.
"""
if self.rect.topleft != (x, y):
self.rect.topleft = (x, y)
self.dirty = 1
if self.rect.size != (width, height):
self.rect.size = (width, height)
self.image = pygame.Surface((width, height), pygame.SRCALPHA, 32)
self.renderer.draw_background(self.image)
self.dirty = 1
class VCursor(pygame.sprite.DirtySprite):
"""Handles the cursor.
The ``index`` represente the absolute position which is the number
of characters before it, all lines included.
"""
def __init__(self, size, renderer):
"""Default constructor.
Parameters
----------
size:
Size tuple (width, height) of the cursor.
renderer:
Renderer used to draw the cursor.
"""
super(VCursor, self).__init__()
self.renderer = renderer
self.image = pygame.Surface(size, pygame.SRCALPHA, 32)
self.rect = self.image.get_rect()
self.index = 0
self.selected = 0
# Blink management
self.clock = pygame.time.Clock()
self.switch_ms = 400
self.switch_counter = 0
self.renderer.draw_cursor(self.image, self)
def set_position(self, position):
"""Set the cursor absolute position.
Parameters
----------
position:
Position tuple (x, y).
"""
if self.rect.topleft != position:
self.rect.topleft = position
self.dirty = 1
def set_index(self, index):
"""Move the cursor at the given index.
Parameters
----------
index:
Absolute (sum all lines) cursor index.
"""
if index != self.index:
self.index = index
self.dirty = 1
def set_selected(self, state):
"""Set the key selection state (1 for selected else 0)
and redraws it.
Parameters
----------
state:
New key state.
"""
if self.selected != int(state):
self.selected = int(state)
self.renderer.draw_cursor(self.image, self)
self.dirty = 1
def update(self, events):
"""Toggle visibility of the cursor."""
self.clock.tick()
self.switch_counter += self.clock.get_time()
if self.switch_counter >= self.switch_ms:
self.switch_counter %= self.switch_ms
self.visible = int(not self.visible)
self.dirty = 1
class VLine(pygame.sprite.DirtySprite):
"""Handles a line of text. A line can be fed until the text
width reaches the line width.
By default, when the line is empty, its ``visible`` attribute
is set to 0 to hide the line.
"""
def __init__(self, size, renderer, always_visible=False):
"""Default constructor.
Parameters
----------
size:
Size tuple (width, height) of the line.
renderer:
Renderer used to draw the line.
always_visible:
If True, the line will be never hidden even if it is empty.
"""
super(VLine, self).__init__()
self.renderer = renderer
self.image = pygame.Surface(size, pygame.SRCALPHA, 32)
self.rect = pygame.Rect((0, 0), size)
self.text = ''
self.full = False
self.always_visible = always_visible
self.renderer.draw_text(self.image, '')
def __len__(self):
"""Return the number of characters in the line."""
return len(self.text)
def set_position(self, position):
"""Set the line absolute position.
Parameters
----------
position:
Position tuple (x, y).
"""
if self.rect.topleft != position:
self.rect.topleft = position
self.dirty = 1
def clear(self):
"""Clear the current text."""
if self.text:
self.text = ''
self.full = False
self.renderer.draw_text(self.image, '')
if not self.always_visible:
self.visible = 0
else:
self.dirty = 1
return self.text
def feed(self, text):
"""Feed the line with the given text. The current text is
cleared if an empty string is given.
Parameters
----------
text:
Text to feed in.
Returns
-------
remain:
Return the remaining text if the line is full.
"""
if not text:
return self.clear()
elif self.text:
if text.startswith(self.text):
if self.full:
return text[len(self.text):]
else:
self.text = ''
self.text, _ = self.renderer.truncate(text,
self.rect.width,
len(self.text))
if text[len(self.text):]:
self.full = True
else:
self.full = False
self.dirty = 1
self.visible = 1 # Show line
self.renderer.draw_text(self.image, self.text)
return text[len(self.text):]
class VTextInput(object):
"""Handles the text input box.
"""
def __init__(self,
position,
size,
border=2,
renderer=VKeyboardRenderer.DEFAULT):
"""Default constructor.
Parameters
----------
position:
Position tuple (x, y)
size:
Size tuple (width, height) of the text input.
border:
Border thickness.
renderer:
Text input renderer instance, using VTextInputRenderer.DEFAULT
if not specified.
"""
self.state = 0
self.selected = 0
self.position = position
self.size = size # One ligne size
self.text = ''
self.text_margin = border
self.renderer = renderer
# Define background sprites
self.eraser = None
self.background = VBackground(size, renderer)
self.background.set_rect(self.position[0],
self.position[1] - 2 * self.text_margin,
self.size[0],
self.size[1] + 2 * self.text_margin)
self.sprites = pygame.sprite.LayeredDirty(self.background)
# Initialize first line
line = VLine((self.size[0] - 2 * self.text_margin,
self.size[1]), renderer, True)
line.set_position((
self.position[0] + self.text_margin,
self.position[1] - self.text_margin))
self.sprites.add(line, layer=1)
# Initialize cursor
self.cursor = VCursor((2, size[1] - self.text_margin * 2), renderer)
self.cursor.set_position((
self.position[0] + self.text_margin,
self.position[1]))
self.sprites.add(self.cursor, layer=2)
self.disable()
def enable(self):
"""Set this text input as active."""
self.state = 1
self.cursor.visible = 1
self.background.visible = 1
self.sprites.get_sprites_from_layer(1)[0].visible = 1
def is_enabled(self):
"""Return True if this keyboard is active."""
return self.state == 1
def disable(self):
"""Set this text input as non active."""
self.state = 0
self.cursor.visible = 0
self.background.visible = 0
for line in self.sprites.get_sprites_from_layer(1):
line.visible = 0
def set_selected(self, state):
"""Set the input box selection state (1 for selected else 0)
and redraws it.
Parameters
----------
state:
New key state.
"""
self.selected = int(state)
self.cursor.set_selected(state)
def get_rect(self):
"""Return text input rect."""
return self.background.rect
def draw(self, surface, force):
"""Draw the text input box.
Parameters
----------
surface:
Surface on which the VTextInput is drawn.
force:
Force the drawing of the entire surface (time consuming).
"""
# Setup the surface used to hide/clear the text input
if surface and surface != self.eraser:
self.eraser = surface
self.sprites.clear(surface, self.eraser.copy())
self.sprites.set_clip(pygame.Rect(self.position[0], 0,
self.size[0],
self.position[1] + self.size[1]))
if force:
self.sprites.repaint_rect(self.background.rect)
return self.sprites.draw(surface)
def update(self, events):
"""Pygame events processing callback method.
Parameters
----------
events:
List of events to process.
"""
if self.state > 0:
self.sprites.update(events)
for event in events:
if event.type == pygame.KEYUP and self.cursor.selected:
if event.key == pygame.K_LEFT:
self.increment_cursor(-1)
elif event.key == pygame.K_RIGHT:
self.increment_cursor(1)
elif event.key == pygame.K_HOME:
self.cursor.index = 0
self.increment_cursor(0)
elif event.key == pygame.K_END:
self.cursor.index = 0
self.increment_cursor(len(self.text))
if event.type == pygame.MOUSEBUTTONDOWN\
and event.button in (1, 2, 3):
# Don't consider the mouse wheel (button 4 & 5):
self.set_cursor(event.pos)
if event.type == pygame.FINGERDOWN:
display_size = pygame.display.get_surface().get_size()
finger_pos = (event.x * display_size[0], event.y * display_size[1])
self.set_cursor(finger_pos)
def update_lines(self):
"""Update lines content with the current text."""
if self.state > 0:
remain = self.text
# Update existing line with text
for line in self.sprites.get_sprites_from_layer(1):
remain = line.feed(remain)
# Create new lines if necessary
while remain:
line = VLine((self.size[0] - 2 * self.text_margin,
self.size[1]), self.renderer)
self.sprites.add(line, layer=1)
remain = line.feed(remain)
# Update lines positions
i = 0
for line in reversed(self.sprites.get_sprites_from_layer(1)):
if line.visible:
x = self.position[0] + self.text_margin
y = self.position[1] - i * self.size[1] - self.text_margin
line.set_position((x, y))
i += 1
self.background.set_rect(self.position[0],
line.rect.y - self.text_margin,
self.size[0],
i * self.size[1] + 2 * self.text_margin)
def set_text(self, text):
"""Overwrite the current text with the given one. The cursor is
moved at the end of the text.
Parameters
----------
text:
New text.
"""
self.text = text
self.update_lines()
self.cursor.index = 0
self.increment_cursor(len(text))
def add_at_cursor(self, text):
"""Add a text whereever the cursor is currently located.
Parameters
----------
text:
Single char or text to append.
"""
if self.cursor.index < len(self.text):
# Inserting in the text
prefix = self.text[:self.cursor.index]
suffix = self.text[self.cursor.index:]
self.text = prefix + text + suffix
else:
self.text += text
self.update_lines()
self.increment_cursor(1)
def delete_at_cursor(self):
"""Delete a character before the cursor position."""
if self.cursor.index == 0:
return
prefix = self.text[:self.cursor.index - 1]
suffix = self.text[self.cursor.index:]
self.text = prefix + suffix
self.update_lines()
self.increment_cursor(-1)
def increment_cursor(self, step):
"""Move the cursor of one or more steps (but not beyond the
text length).
Parameters
----------
step:
From how many characters the cursor shall move.
"""
pos = max(0, self.cursor.index + step)
self.cursor.set_index(min(pos, len(self.text)))
# Update cursor position
chars_counter = 0
for line in self.sprites.get_sprites_from_layer(1):
if chars_counter + len(line) >= self.cursor.index:
idx = self.cursor.index - chars_counter
x = self.text_margin + self.renderer.get_text_width(
line.text[:idx])
self.cursor.set_position((x, line.rect.y + self.text_margin))
break
| |
from datetime import datetime
import tensorflow as tf
import os
import numpy as np
from typing import List
class TransferVGG(object):
@classmethod
def get_model(cls, img_w=256, img_h=256, decoding_start_f=512, keep_last_max_pooling=True, fine_tuning=True):
vgg: tf.keras.Model = tf.keras.applications.VGG19(include_top=False, weights="imagenet",
input_shape=(img_h, img_w, 3))
vgg_layers: List[tf.keras.layers.Layer] = vgg.layers
if not fine_tuning:
for layer in vgg_layers:
layer.trainable = False
k_init = "he_normal"
# Decoder
decoder_depth = 4
if keep_last_max_pooling:
layer_idx = [-1, -10, 8, 4, 1]
prev_layer = vgg_layers[layer_idx[0]].output
up_sampling = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f, strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3),
kernel_initializer=k_init)(prev_layer)
prev_layer = up_sampling
else:
layer_idx = [-2, -10, 8, 4, 1]
prev_layer = vgg_layers[layer_idx[0]].output
for i in range(0, decoder_depth):
up_sampling = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f // (2 ** i), strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3),
kernel_initializer=k_init)(prev_layer)
merge = tf.keras.layers.concatenate([up_sampling, vgg_layers[layer_idx[i + 1]].output])
merge = tf.keras.layers.Conv2D(filters=decoding_start_f // (2 ** i), strides=(1, 1),
padding="same", activation="relu", kernel_size=(3, 3),
kernel_initializer=k_init)(merge)
prev_layer = merge
# Output
output = tf.keras.layers.Conv2DTranspose(filters=1,
kernel_size=(3, 3), activation='sigmoid',
padding='same',
kernel_initializer='glorot_normal')(prev_layer)
model = tf.keras.Model(inputs=vgg_layers[0].input, outputs=output)
optimizer = 'adam'
loss = bce_dice_loss
metrics = [map_iou]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
class TransferResNet50V2(object):
@classmethod
def get_model(cls, img_w=256, img_h=256, fine_tuning=True):
decoding_start_f = 256
k_init = 'he_normal' # kernel initializer
# Encoder
inception_resnet: tf.keras.Model = tf.keras.applications.ResNet50V2(include_top=False,
weights='imagenet',
input_shape=(img_h, img_w, 3))
encoder_layers: List[tf.keras.layers.Layer] = inception_resnet.layers.copy()
if not fine_tuning:
for layer in encoder_layers:
layer.trainable = False
# Decoder
up_sampling_1 = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f, strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3),
kernel_initializer=k_init)(encoder_layers[-1].output)
merge_1 = tf.keras.layers.concatenate([encoder_layers[-46].output, up_sampling_1])
merge_1 = tf.keras.layers.Conv2D(kernel_size=(3, 3), kernel_initializer=k_init, activation="relu",
padding="same", filters=decoding_start_f)(merge_1)
up_sampling_2 = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f // 2, strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3), kernel_initializer=k_init)(merge_1)
merge_2 = tf.keras.layers.concatenate([encoder_layers[-112].output, up_sampling_2])
merge_2 = tf.keras.layers.Conv2D(kernel_size=(3, 3), kernel_initializer=k_init, activation="relu",
padding="same", filters=decoding_start_f // 2)(merge_2)
up_sampling_3 = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f // 4, strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3), kernel_initializer=k_init)(merge_2)
merge_3 = tf.keras.layers.concatenate([encoder_layers[-158].output, up_sampling_3])
merge_3 = tf.keras.layers.Conv2D(kernel_size=(3, 3), kernel_initializer=k_init, activation="relu",
padding="same", filters=decoding_start_f // 4)(merge_3)
up_sampling_4 = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f // 4, strides=(2, 2),
padding="same", activation="relu",
kernel_initializer=k_init, kernel_size=(3, 3))(merge_3)
merge_4 = tf.keras.layers.concatenate([encoder_layers[2].output, up_sampling_4])
merge_4 = tf.keras.layers.Conv2D(filters=decoding_start_f // 4, strides=(1, 1),
padding="same", activation="relu", kernel_initializer=k_init,
kernel_size=(3, 3))(merge_4)
up_sampling_5 = tf.keras.layers.Conv2DTranspose(filters=decoding_start_f // 8, strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3), kernel_initializer=k_init)(merge_4)
up_sampling_5 = tf.keras.layers.Conv2D(filters=decoding_start_f // 16, strides=(1, 1),
padding="same", activation="relu",
kernel_size=(3, 3), kernel_initializer=k_init)(up_sampling_5)
# Output layer
output = tf.keras.layers.Conv2DTranspose(filters=1, kernel_size=(3, 3), activation='sigmoid', padding='same',
kernel_initializer='glorot_normal')(up_sampling_5)
model = tf.keras.Model(inputs=encoder_layers[0].input, outputs=output)
optimizer = 'adam'
loss = bce_dice_loss
metrics = [map_iou]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
class Unet(object):
@classmethod
def get_model(cls, img_w=256, img_h=256):
"""
Taken from https://www.kaggle.com/weiji14/yet-another-keras-u-net-data-augmentation#Part-2---Build-model
"""
n_ch_exps = [4, 5, 6, 7, 8, 9] # the n-th deep channel's exponent i.e. 2**n 16,32,64,128,256
k_size = (3, 3) # size of filter kernel
k_init = 'he_normal' # kernel initializer
ch_axis = 3
input_shape = (img_w, img_h, 3)
inp = tf.keras.layers.Input(shape=input_shape)
encodeds = []
# encoder
enc = inp
print(n_ch_exps)
for l_idx, n_ch in enumerate(n_ch_exps):
enc = tf.keras.layers.Conv2D(filters=2 ** n_ch, kernel_size=k_size, activation='relu', padding='same',
kernel_initializer=k_init)(enc)
enc = tf.keras.layers.Dropout(0.1 * l_idx, )(enc)
enc = tf.keras.layers.Conv2D(filters=2 ** n_ch, kernel_size=k_size, activation='relu', padding='same',
kernel_initializer=k_init)(enc)
encodeds.append(enc)
# print(l_idx, enc)
if n_ch < n_ch_exps[-1]: # do not run max pooling on the last encoding/downsampling step
enc = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(enc)
# decoder
dec = enc
print(n_ch_exps[::-1][1:])
decoder_n_chs = n_ch_exps[::-1][1:]
for l_idx, n_ch in enumerate(decoder_n_chs):
l_idx_rev = len(n_ch_exps) - l_idx - 2 #
dec = tf.keras.layers.Conv2DTranspose(filters=2 ** n_ch, kernel_size=k_size, strides=(2, 2),
activation='relu', padding='same', kernel_initializer=k_init)(dec)
dec = tf.keras.layers.concatenate([dec, encodeds[l_idx_rev]], axis=ch_axis)
dec = tf.keras.layers.Conv2D(filters=2 ** n_ch, kernel_size=k_size, activation='relu', padding='same',
kernel_initializer=k_init)(dec)
dec = tf.keras.layers.Dropout(0.1 * l_idx)(dec)
dec = tf.keras.layers.Conv2D(filters=2 ** n_ch, kernel_size=k_size, activation='relu', padding='same',
kernel_initializer=k_init)(dec)
outp = tf.keras.layers.Conv2DTranspose(filters=1, kernel_size=k_size, activation='sigmoid', padding='same',
kernel_initializer='glorot_normal')(dec)
model = tf.keras.Model(inputs=[inp], outputs=[outp])
optimizer = 'adam'
loss = bce_dice_loss
metrics = [map_iou]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
class TransposeSkipConn(object):
"""
Model improving from TransponseConvModel in the sense that now skip connections are present,
in order to combine local and global information about the image.
This model is implemented with functional API, instead of Sequential model
"""
model_name = "TransponseConvolutionSkipConnections"
@classmethod
def get_model(cls, start_f, img_h=256, img_w=256):
x = tf.keras.Input(shape=(img_w, img_h, 1)) # Input layer
# Encoder module
conv1 = tf.keras.layers.Conv2D(filters=start_f, kernel_size=(4, 4), strides=(2, 2),
padding="same", activation="relu")(x)
conv2 = tf.keras.layers.Conv2D(filters=start_f * 2, kernel_size=(3, 3), strides=(1, 1),
padding="same", activation="relu")(conv1)
maxpool1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(conv2)
conv3 = tf.keras.layers.Conv2D(filters=start_f * 4, kernel_size=(3, 3), strides=(2, 2),
padding="same", activation="relu")(maxpool1)
maxpool2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(conv3)
conv4 = tf.keras.layers.Conv2D(filters=start_f * 8, kernel_size=(3, 3), strides=(2, 2),
padding="same", activation="relu")(maxpool2)
# Decoder module
up_sampling1 = tf.keras.layers.Conv2DTranspose(filters=(start_f * 4), strides=(2, 2),
padding="same", activation="relu",
kernel_size=(3, 3))(conv4)
mix1 = tf.keras.layers.Add()([up_sampling1, maxpool2])
up_sampling2 = tf.keras.layers.Conv2DTranspose(filters=(start_f * 2), strides=(4, 4),
padding="same", activation="relu",
kernel_size=(3, 3))(mix1)
mix2 = tf.keras.layers.Add()([up_sampling2, maxpool1])
up_sampling3 = tf.keras.layers.Conv2DTranspose(filters=(start_f * 2), strides=(2, 2),
padding="same", activation="relu", kernel_size=(3, 3))(mix2)
mix3 = tf.keras.layers.Add()([up_sampling3, conv2])
up_sampling4 = tf.keras.layers.Conv2DTranspose(filters=start_f, strides=(1, 1), padding="same",
activation="relu", kernel_size=(4, 4))(mix3)
mix4 = tf.keras.layers.Add()([up_sampling4, conv1])
up_sampling5 = tf.keras.layers.Conv2DTranspose(filters=start_f, strides=(2, 2), padding="same",
activation="relu", kernel_size=(3, 3))(mix4)
# Output layer
output_layer = tf.keras.layers.Conv2DTranspose(filters=1,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='sigmoid')(up_sampling5)
model = tf.keras.Model(inputs=x, outputs=output_layer)
compile_model(model)
return model
class TransposeConvModel(object):
"""
Model using transpose convolution layers:
It is a first improvement over the model provided in the notebook:
- Number of filters improve with 2^n
- Up-sampling layers are substituted with transpose convolutions, in order to improve the un-learnability
of the up-sampling filters
The model as 1M of parameter (with depth = 4, start_f=8).
"""
model_name = "TransposeConvolution"
@classmethod
def get_model(cls, depth, start_f, img_h=256, img_w=256):
model = tf.keras.Sequential(name=cls.model_name)
# Encoder
for i in range(depth):
input_shape = [img_h, img_w, 3] if i == 0 else [None]
model.add(tf.keras.layers.Conv2D(filters=start_f,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
input_shape=input_shape,
activation="relu"))
if i >= 1:
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
start_f = start_f * ((i + 1) ** 2)
# Decoder
for i in range(depth - 1):
start_f = start_f // ((depth - i) ** 2)
model.add(tf.keras.layers.Conv2DTranspose(filters=start_f,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
activation="relu"))
# Prediction layer
model.add(tf.keras.layers.Conv2DTranspose(filters=1,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='sigmoid'))
compile_model(model)
return model
class NotebookModel(object):
"""
Lab notebook model adapted for this Kaggle competition
- First submission done with {'depth': 6, 'start_f': 16} on 11th epoch
"""
model_name = "NotebookModel"
@classmethod
def get_model(cls, depth, start_f, img_h=256, img_w=256):
model = tf.keras.Sequential(name=cls.model_name)
# Encoder
for i in range(depth):
input_shape = [img_h, img_w, 3] if i == 0 else [None]
model.add(tf.keras.layers.Conv2D(filters=start_f,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
input_shape=input_shape))
model.add(tf.keras.layers.ReLU())
model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
start_f *= 2
# Decoder
for i in range(depth):
model.add(tf.keras.layers.UpSampling2D(2, interpolation='bilinear'))
model.add(tf.keras.layers.Conv2D(filters=start_f // 2,
kernel_size=(3, 3),
strides=(1, 1),
padding='same'))
model.add(tf.keras.layers.ReLU())
start_f = start_f // 2
# Prediction Layer
model.add(tf.keras.layers.Conv2D(filters=1,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
activation='sigmoid'))
compile_model(model)
return model
def bce_dice_loss(y_true, y_pred):
return 0.5 * tf.keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = tf.keras.backend.flatten(y_true)
y_pred_f = tf.keras.backend.flatten(y_pred)
intersection = tf.keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (tf.keras.backend.sum(y_true_f) + tf.keras.backend.sum(y_pred_f) + smooth)
def iou_single(y_true, y_pred):
# from pobability to predicted class {0, 1}
y_pred = tf.cast(y_pred > 0.5, tf.float32) # for sigmoid only
# A and B
intersection = tf.reduce_sum(y_true * y_pred)
# A or B
union = tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) - intersection
# IoU
return intersection / union
def map_iou(y_true, y_pred):
def cast_float(x):
return tf.keras.backend.cast(x, tf.keras.backend.floatx())
def cast_bool(x):
return tf.keras.backend.cast(x, bool)
def iou_loss_core(true, pred):
intersection = true * pred
notTrue = 1 - true
union = true + (notTrue * pred)
return (tf.keras.backend.sum(intersection, axis=-1) +
tf.keras.backend.epsilon()) / (tf.keras.backend.sum(union, axis=-1) + tf.keras.backend.epsilon())
thresholds = np.linspace(start=0.5, stop=0.95, num=10)
# flattened images (batch, pixels)
true = tf.keras.backend.batch_flatten(y_true)
pred = tf.keras.backend.batch_flatten(y_pred)
pred = cast_float(tf.keras.backend.greater(pred, 0.5)) # consider class 1 when it is greater than 0.5
# total white pixels - (batch,)
true_sum = tf.keras.backend.sum(true, axis=-1)
pred_sum = tf.keras.backend.sum(pred, axis=-1)
true1 = cast_float(tf.keras.backend.greater(true_sum, 1))
pred1 = cast_float(tf.keras.backend.greater(pred_sum, 1))
true_positive_mask = cast_bool(true1 * pred1)
# separating only the possible true positives to check iou
test_true = tf.boolean_mask(true, true_positive_mask)
test_pred = tf.boolean_mask(pred, true_positive_mask)
# getting iou and threshold comparisons
iou = iou_loss_core(test_true, test_pred)
true_positives = [cast_float(tf.keras.backend.greater(iou, thres)) for thres in thresholds]
# mean of thresholds for true positives and total sum
true_positives = tf.keras.backend.mean(tf.keras.backend.stack(true_positives, axis=-1), axis=-1)
true_positives = tf.keras.backend.sum(true_positives)
# to get images that don't have mask in both true and pred
true_negatives = (1 - true1) * (1 - pred1) # = 1 -true1 - pred1 + true1*pred1
true_negatives = tf.keras.backend.sum(true_negatives)
return (true_positives + true_negatives) / cast_float(tf.keras.backend.shape(y_true)[0])
def compile_model(model, lr=0.001):
loss | |
1)
m.c1916 = Constraint(expr= m.b203 - m.b204 + m.b280 <= 1)
m.c1917 = Constraint(expr= m.b203 - m.b205 + m.b281 <= 1)
m.c1918 = Constraint(expr= m.b203 - m.b206 + m.b282 <= 1)
m.c1919 = Constraint(expr= m.b203 - m.b207 + m.b283 <= 1)
m.c1920 = Constraint(expr= m.b203 - m.b208 + m.b284 <= 1)
m.c1921 = Constraint(expr= m.b203 - m.b209 + m.b285 <= 1)
m.c1922 = Constraint(expr= m.b204 - m.b205 + m.b286 <= 1)
m.c1923 = Constraint(expr= m.b204 - m.b206 + m.b287 <= 1)
m.c1924 = Constraint(expr= m.b204 - m.b207 + m.b288 <= 1)
m.c1925 = Constraint(expr= m.b204 - m.b208 + m.b289 <= 1)
m.c1926 = Constraint(expr= m.b204 - m.b209 + m.b290 <= 1)
m.c1927 = Constraint(expr= m.b205 - m.b206 + m.b291 <= 1)
m.c1928 = Constraint(expr= m.b205 - m.b207 + m.b292 <= 1)
m.c1929 = Constraint(expr= m.b205 - m.b208 + m.b293 <= 1)
m.c1930 = Constraint(expr= m.b205 - m.b209 + m.b294 <= 1)
m.c1931 = Constraint(expr= m.b206 - m.b207 + m.b295 <= 1)
m.c1932 = Constraint(expr= m.b206 - m.b208 + m.b296 <= 1)
m.c1933 = Constraint(expr= m.b206 - m.b209 + m.b297 <= 1)
m.c1934 = Constraint(expr= m.b207 - m.b208 + m.b298 <= 1)
m.c1935 = Constraint(expr= m.b207 - m.b209 + m.b299 <= 1)
m.c1936 = Constraint(expr= m.b208 - m.b209 + m.b300 <= 1)
m.c1937 = Constraint(expr= m.b210 - m.b211 + m.b223 <= 1)
m.c1938 = Constraint(expr= m.b210 - m.b212 + m.b224 <= 1)
m.c1939 = Constraint(expr= m.b210 - m.b213 + m.b225 <= 1)
m.c1940 = Constraint(expr= m.b210 - m.b214 + m.b226 <= 1)
m.c1941 = Constraint(expr= m.b210 - m.b215 + m.b227 <= 1)
m.c1942 = Constraint(expr= m.b210 - m.b216 + m.b228 <= 1)
m.c1943 = Constraint(expr= m.b210 - m.b217 + m.b229 <= 1)
m.c1944 = Constraint(expr= m.b210 - m.b218 + m.b230 <= 1)
m.c1945 = Constraint(expr= m.b210 - m.b219 + m.b231 <= 1)
m.c1946 = Constraint(expr= m.b210 - m.b220 + m.b232 <= 1)
m.c1947 = Constraint(expr= m.b210 - m.b221 + m.b233 <= 1)
m.c1948 = Constraint(expr= m.b210 - m.b222 + m.b234 <= 1)
m.c1949 = Constraint(expr= m.b211 - m.b212 + m.b235 <= 1)
m.c1950 = Constraint(expr= m.b211 - m.b213 + m.b236 <= 1)
m.c1951 = Constraint(expr= m.b211 - m.b214 + m.b237 <= 1)
m.c1952 = Constraint(expr= m.b211 - m.b215 + m.b238 <= 1)
m.c1953 = Constraint(expr= m.b211 - m.b216 + m.b239 <= 1)
m.c1954 = Constraint(expr= m.b211 - m.b217 + m.b240 <= 1)
m.c1955 = Constraint(expr= m.b211 - m.b218 + m.b241 <= 1)
m.c1956 = Constraint(expr= m.b211 - m.b219 + m.b242 <= 1)
m.c1957 = Constraint(expr= m.b211 - m.b220 + m.b243 <= 1)
m.c1958 = Constraint(expr= m.b211 - m.b221 + m.b244 <= 1)
m.c1959 = Constraint(expr= m.b211 - m.b222 + m.b245 <= 1)
m.c1960 = Constraint(expr= m.b212 - m.b213 + m.b246 <= 1)
m.c1961 = Constraint(expr= m.b212 - m.b214 + m.b247 <= 1)
m.c1962 = Constraint(expr= m.b212 - m.b215 + m.b248 <= 1)
m.c1963 = Constraint(expr= m.b212 - m.b216 + m.b249 <= 1)
m.c1964 = Constraint(expr= m.b212 - m.b217 + m.b250 <= 1)
m.c1965 = Constraint(expr= m.b212 - m.b218 + m.b251 <= 1)
m.c1966 = Constraint(expr= m.b212 - m.b219 + m.b252 <= 1)
m.c1967 = Constraint(expr= m.b212 - m.b220 + m.b253 <= 1)
m.c1968 = Constraint(expr= m.b212 - m.b221 + m.b254 <= 1)
m.c1969 = Constraint(expr= m.b212 - m.b222 + m.b255 <= 1)
m.c1970 = Constraint(expr= m.b213 - m.b214 + m.b256 <= 1)
m.c1971 = Constraint(expr= m.b213 - m.b215 + m.b257 <= 1)
m.c1972 = Constraint(expr= m.b213 - m.b216 + m.b258 <= 1)
m.c1973 = Constraint(expr= m.b213 - m.b217 + m.b259 <= 1)
m.c1974 = Constraint(expr= m.b213 - m.b218 + m.b260 <= 1)
m.c1975 = Constraint(expr= m.b213 - m.b219 + m.b261 <= 1)
m.c1976 = Constraint(expr= m.b213 - m.b220 + m.b262 <= 1)
m.c1977 = Constraint(expr= m.b213 - m.b221 + m.b263 <= 1)
m.c1978 = Constraint(expr= m.b213 - m.b222 + m.b264 <= 1)
m.c1979 = Constraint(expr= m.b214 - m.b215 + m.b265 <= 1)
m.c1980 = Constraint(expr= m.b214 - m.b216 + m.b266 <= 1)
m.c1981 = Constraint(expr= m.b214 - m.b217 + m.b267 <= 1)
m.c1982 = Constraint(expr= m.b214 - m.b218 + m.b268 <= 1)
m.c1983 = Constraint(expr= m.b214 - m.b219 + m.b269 <= 1)
m.c1984 = Constraint(expr= m.b214 - m.b220 + m.b270 <= 1)
m.c1985 = Constraint(expr= m.b214 - m.b221 + m.b271 <= 1)
m.c1986 = Constraint(expr= m.b214 - m.b222 + m.b272 <= 1)
m.c1987 = Constraint(expr= m.b215 - m.b216 + m.b273 <= 1)
m.c1988 = Constraint(expr= m.b215 - m.b217 + m.b274 <= 1)
m.c1989 = Constraint(expr= m.b215 - m.b218 + m.b275 <= 1)
m.c1990 = Constraint(expr= m.b215 - m.b219 + m.b276 <= 1)
m.c1991 = Constraint(expr= m.b215 - m.b220 + m.b277 <= 1)
m.c1992 = Constraint(expr= m.b215 - m.b221 + m.b278 <= 1)
m.c1993 = Constraint(expr= m.b215 - m.b222 + m.b279 <= 1)
m.c1994 = Constraint(expr= m.b216 - m.b217 + m.b280 <= 1)
m.c1995 = Constraint(expr= m.b216 - m.b218 + m.b281 <= 1)
m.c1996 = Constraint(expr= m.b216 - m.b219 + m.b282 <= 1)
m.c1997 = Constraint(expr= m.b216 - m.b220 + m.b283 <= 1)
m.c1998 = Constraint(expr= m.b216 - m.b221 + m.b284 <= 1)
m.c1999 = Constraint(expr= m.b216 - m.b222 + m.b285 <= 1)
m.c2000 = Constraint(expr= m.b217 - m.b218 + m.b286 <= 1)
m.c2001 = Constraint(expr= m.b217 - m.b219 + m.b287 <= 1)
m.c2002 = Constraint(expr= m.b217 - m.b220 + m.b288 <= 1)
m.c2003 = Constraint(expr= m.b217 - m.b221 + m.b289 <= 1)
m.c2004 = Constraint(expr= m.b217 - m.b222 + m.b290 <= 1)
m.c2005 = Constraint(expr= m.b218 - m.b219 + m.b291 <= 1)
m.c2006 = Constraint(expr= m.b218 - m.b220 + m.b292 <= 1)
m.c2007 = Constraint(expr= m.b218 - m.b221 + m.b293 <= 1)
m.c2008 = Constraint(expr= m.b218 - m.b222 + m.b294 <= 1)
m.c2009 = Constraint(expr= m.b219 - m.b220 + m.b295 <= 1)
m.c2010 = Constraint(expr= m.b219 - m.b221 + m.b296 <= 1)
m.c2011 = Constraint(expr= m.b219 - m.b222 + m.b297 <= 1)
m.c2012 = Constraint(expr= m.b220 - m.b221 + m.b298 <= 1)
m.c2013 = Constraint(expr= m.b220 - m.b222 + m.b299 <= 1)
m.c2014 = Constraint(expr= m.b221 - m.b222 + m.b300 <= 1)
m.c2015 = Constraint(expr= m.b223 - m.b224 + m.b235 <= 1)
m.c2016 = Constraint(expr= m.b223 - m.b225 + m.b236 <= 1)
m.c2017 = Constraint(expr= m.b223 - m.b226 + m.b237 <= 1)
m.c2018 = Constraint(expr= m.b223 - m.b227 + m.b238 <= 1)
m.c2019 = Constraint(expr= m.b223 - m.b228 + m.b239 <= 1)
m.c2020 = Constraint(expr= m.b223 - m.b229 + m.b240 <= 1)
m.c2021 = Constraint(expr= m.b223 - m.b230 + m.b241 <= 1)
m.c2022 = Constraint(expr= m.b223 - m.b231 + m.b242 <= 1)
m.c2023 = Constraint(expr= m.b223 - m.b232 + m.b243 <= 1)
m.c2024 = Constraint(expr= m.b223 - m.b233 + m.b244 <= 1)
m.c2025 = Constraint(expr= m.b223 - m.b234 + m.b245 <= 1)
m.c2026 = Constraint(expr= m.b224 - m.b225 + m.b246 <= 1)
m.c2027 = Constraint(expr= m.b224 - m.b226 + m.b247 <= 1)
m.c2028 = Constraint(expr= m.b224 - m.b227 + m.b248 <= 1)
m.c2029 = Constraint(expr= m.b224 - m.b228 + m.b249 <= 1)
m.c2030 = Constraint(expr= m.b224 - m.b229 + m.b250 <= 1)
m.c2031 = Constraint(expr= m.b224 - m.b230 + m.b251 <= 1)
m.c2032 = Constraint(expr= m.b224 - m.b231 + m.b252 <= 1)
m.c2033 = Constraint(expr= m.b224 - m.b232 + m.b253 <= 1)
m.c2034 = Constraint(expr= m.b224 - m.b233 + m.b254 <= 1)
m.c2035 = Constraint(expr= m.b224 - m.b234 + m.b255 <= 1)
m.c2036 = Constraint(expr= m.b225 - m.b226 + m.b256 <= 1)
m.c2037 = Constraint(expr= m.b225 - m.b227 + m.b257 <= 1)
m.c2038 = Constraint(expr= m.b225 - m.b228 + m.b258 <= 1)
m.c2039 = Constraint(expr= m.b225 - m.b229 + m.b259 <= 1)
m.c2040 = Constraint(expr= m.b225 - m.b230 + m.b260 <= 1)
m.c2041 = Constraint(expr= m.b225 - m.b231 + m.b261 <= 1)
m.c2042 = Constraint(expr= m.b225 - m.b232 + m.b262 <= 1)
m.c2043 = Constraint(expr= m.b225 - m.b233 + m.b263 <= 1)
m.c2044 = Constraint(expr= m.b225 - m.b234 + m.b264 <= 1)
m.c2045 = Constraint(expr= m.b226 - m.b227 + m.b265 <= 1)
m.c2046 = Constraint(expr= m.b226 - m.b228 + m.b266 <= 1)
m.c2047 = Constraint(expr= m.b226 - m.b229 + m.b267 <= 1)
m.c2048 = Constraint(expr= m.b226 - m.b230 + m.b268 <= 1)
m.c2049 = Constraint(expr= m.b226 - m.b231 + m.b269 <= 1)
m.c2050 = Constraint(expr= m.b226 - m.b232 + m.b270 <= 1)
m.c2051 = Constraint(expr= m.b226 - m.b233 + m.b271 <= 1)
m.c2052 = Constraint(expr= m.b226 - m.b234 + m.b272 <= 1)
m.c2053 = Constraint(expr= m.b227 - m.b228 + m.b273 <= 1)
m.c2054 = Constraint(expr= m.b227 - m.b229 + m.b274 <= 1)
m.c2055 = Constraint(expr= m.b227 - m.b230 + m.b275 <= 1)
m.c2056 = Constraint(expr= m.b227 - m.b231 + m.b276 <= 1)
m.c2057 = Constraint(expr= m.b227 - m.b232 + m.b277 <= 1)
m.c2058 = | |
<reponame>zhammer/dd-trace-py<gh_stars>1-10
import grpc
from grpc._grpcio_metadata import __version__ as _GRPC_VERSION
import time
from grpc.framework.foundation import logging_pool
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.grpc import patch, unpatch
from ddtrace.contrib.grpc import constants
from ddtrace.contrib.grpc.patch import _unpatch_server
from ddtrace.ext import errors
from ddtrace import Pin
from ...base import BaseTracerTestCase
from ...utils import assert_is_measured
from .hello_pb2 import HelloRequest, HelloReply
from .hello_pb2_grpc import add_HelloServicer_to_server, HelloStub, HelloServicer
_GRPC_PORT = 50531
_GRPC_VERSION = tuple([int(i) for i in _GRPC_VERSION.split('.')])
class GrpcTestCase(BaseTracerTestCase):
def setUp(self):
super(GrpcTestCase, self).setUp()
patch()
Pin.override(constants.GRPC_PIN_MODULE_SERVER, tracer=self.tracer)
Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tracer=self.tracer)
self._start_server()
def tearDown(self):
self._stop_server()
# Remove any remaining spans
self.tracer.writer.pop()
# Unpatch grpc
unpatch()
super(GrpcTestCase, self).tearDown()
def get_spans_with_sync_and_assert(self, size=0, retry=20):
# testing instrumentation with grpcio < 1.14.0 presents a problem for
# checking spans written to the dummy tracer
# see https://github.com/grpc/grpc/issues/14621
spans = super(GrpcTestCase, self).get_spans()
if _GRPC_VERSION >= (1, 14):
assert len(spans) == size
return spans
for _ in range(retry):
if len(spans) == size:
assert len(spans) == size
return spans
time.sleep(0.1)
return spans
def _start_server(self):
self._server = grpc.server(logging_pool.pool(2))
self._server.add_insecure_port('[::]:%d' % (_GRPC_PORT))
add_HelloServicer_to_server(_HelloServicer(), self._server)
self._server.start()
def _stop_server(self):
self._server.stop(0)
def _check_client_span(self, span, service, method_name, method_kind):
assert_is_measured(span)
assert span.name == 'grpc'
assert span.resource == '/helloworld.Hello/{}'.format(method_name)
assert span.service == service
assert span.error == 0
assert span.span_type == 'grpc'
assert span.get_tag('grpc.method.path') == '/helloworld.Hello/{}'.format(method_name)
assert span.get_tag('grpc.method.package') == 'helloworld'
assert span.get_tag('grpc.method.service') == 'Hello'
assert span.get_tag('grpc.method.name') == method_name
assert span.get_tag('grpc.method.kind') == method_kind
assert span.get_tag('grpc.status.code') == 'StatusCode.OK'
assert span.get_tag('grpc.host') == 'localhost'
assert span.get_tag('grpc.port') == '50531'
def _check_server_span(self, span, service, method_name, method_kind):
assert_is_measured(span)
assert span.name == 'grpc'
assert span.resource == '/helloworld.Hello/{}'.format(method_name)
assert span.service == service
assert span.error == 0
assert span.span_type == 'grpc'
assert span.get_tag('grpc.method.path') == '/helloworld.Hello/{}'.format(method_name)
assert span.get_tag('grpc.method.package') == 'helloworld'
assert span.get_tag('grpc.method.service') == 'Hello'
assert span.get_tag('grpc.method.name') == method_name
assert span.get_tag('grpc.method.kind') == method_kind
def test_insecure_channel_using_args_parameter(self):
def insecure_channel_using_args(target):
return grpc.insecure_channel(target)
self._test_insecure_channel(insecure_channel_using_args)
def test_insecure_channel_using_kwargs_parameter(self):
def insecure_channel_using_kwargs(target):
return grpc.insecure_channel(target=target)
self._test_insecure_channel(insecure_channel_using_kwargs)
def _test_insecure_channel(self, insecure_channel_function):
target = 'localhost:%d' % (_GRPC_PORT)
with insecure_channel_function(target) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
self._check_client_span(client_span, 'grpc-client', 'SayHello', 'unary')
self._check_server_span(server_span, 'grpc-server', 'SayHello', 'unary')
def test_secure_channel_using_args_parameter(self):
def secure_channel_using_args(target, **kwargs):
return grpc.secure_channel(target, **kwargs)
self._test_secure_channel(secure_channel_using_args)
def test_secure_channel_using_kwargs_parameter(self):
def secure_channel_using_kwargs(target, **kwargs):
return grpc.secure_channel(target=target, **kwargs)
self._test_secure_channel(secure_channel_using_kwargs)
def _test_secure_channel(self, secure_channel_function):
target = 'localhost:%d' % (_GRPC_PORT)
with secure_channel_function(target, credentials=grpc.ChannelCredentials(None)) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
self._check_client_span(client_span, 'grpc-client', 'SayHello', 'unary')
self._check_server_span(server_span, 'grpc-server', 'SayHello', 'unary')
def test_pin_not_activated(self):
self.tracer.configure(enabled=False)
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert()
assert len(spans) == 0
def test_pin_tags_are_put_in_span(self):
# DEV: stop and restart server to catch overriden pin
self._stop_server()
Pin.override(constants.GRPC_PIN_MODULE_SERVER, service='server1')
Pin.override(constants.GRPC_PIN_MODULE_SERVER, tags={'tag1': 'server'})
Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tags={'tag2': 'client'})
self._start_server()
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
assert spans[0].service == 'server1'
assert spans[0].get_tag('tag1') == 'server'
assert spans[1].get_tag('tag2') == 'client'
def test_pin_can_be_defined_per_channel(self):
Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service='grpc1')
channel1 = grpc.insecure_channel('localhost:%d' % (_GRPC_PORT))
Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service='grpc2')
channel2 = grpc.insecure_channel('localhost:%d' % (_GRPC_PORT))
stub1 = HelloStub(channel1)
stub1.SayHello(HelloRequest(name='test'))
channel1.close()
# DEV: make sure we have two spans before proceeding
spans = self.get_spans_with_sync_and_assert(size=2)
stub2 = HelloStub(channel2)
stub2.SayHello(HelloRequest(name='test'))
channel2.close()
spans = self.get_spans_with_sync_and_assert(size=4)
# DEV: Server service default, client services override
self._check_server_span(spans[0], 'grpc-server', 'SayHello', 'unary')
self._check_client_span(spans[1], 'grpc1', 'SayHello', 'unary')
self._check_server_span(spans[2], 'grpc-server', 'SayHello', 'unary')
self._check_client_span(spans[3], 'grpc2', 'SayHello', 'unary')
def test_analytics_default(self):
with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_analytics_with_rate(self):
with self.override_config(
'grpc_server',
dict(analytics_enabled=True, analytics_sample_rate=0.75)
):
with self.override_config(
'grpc',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
with grpc.secure_channel(
'localhost:%d' % (_GRPC_PORT),
credentials=grpc.ChannelCredentials(None)
) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.75
assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5
def test_analytics_without_rate(self):
with self.override_config(
'grpc_server',
dict(analytics_enabled=True)
):
with self.override_config(
'grpc',
dict(analytics_enabled=True)
):
with grpc.secure_channel(
'localhost:%d' % (_GRPC_PORT),
credentials=grpc.ChannelCredentials(None)
) as channel:
stub = HelloStub(channel)
stub.SayHello(HelloRequest(name='test'))
spans = self.get_spans_with_sync_and_assert(size=2)
assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0
assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0
def test_server_stream(self):
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
responses_iterator = stub.SayHelloTwice(HelloRequest(name='test'))
assert len(list(responses_iterator)) == 2
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
self._check_client_span(client_span, 'grpc-client', 'SayHelloTwice', 'server_streaming')
self._check_server_span(server_span, 'grpc-server', 'SayHelloTwice', 'server_streaming')
def test_client_stream(self):
requests_iterator = iter(
HelloRequest(name=name) for name in
['first', 'second']
)
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
response = stub.SayHelloLast(requests_iterator)
assert response.message == 'first;second'
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
self._check_client_span(client_span, 'grpc-client', 'SayHelloLast', 'client_streaming')
self._check_server_span(server_span, 'grpc-server', 'SayHelloLast', 'client_streaming')
def test_bidi_stream(self):
requests_iterator = iter(
HelloRequest(name=name) for name in
['first', 'second', 'third', 'fourth', 'fifth']
)
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
responses = stub.SayHelloRepeatedly(requests_iterator)
messages = [r.message for r in responses]
assert list(messages) == ['first;second', 'third;fourth', 'fifth']
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
self._check_client_span(client_span, 'grpc-client', 'SayHelloRepeatedly', 'bidi_streaming')
self._check_server_span(server_span, 'grpc-server', 'SayHelloRepeatedly', 'bidi_streaming')
def test_priority_sampling(self):
# DEV: Priority sampling is enabled by default
# Setting priority sampling reset the writer, we need to re-override it
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
response = stub.SayHello(HelloRequest(name='propogator'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert 'x-datadog-trace-id={}'.format(client_span.trace_id) in response.message
assert 'x-datadog-parent-id={}'.format(client_span.span_id) in response.message
assert 'x-datadog-sampling-priority=1' in response.message
def test_unary_abort(self):
with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel:
stub = HelloStub(channel)
with self.assertRaises(grpc.RpcError):
stub.SayHello(HelloRequest(name='abort'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert client_span.resource == '/helloworld.Hello/SayHello'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'aborted'
assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.ABORTED'
assert client_span.get_tag('grpc.status.code') == 'StatusCode.ABORTED'
def test_custom_interceptor_exception(self):
# add an interceptor that raises a custom exception and check error tags
# are added to spans
raise_exception_interceptor = _RaiseExceptionClientInterceptor()
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
with self.assertRaises(_CustomException):
intercept_channel = grpc.intercept_channel(
channel,
raise_exception_interceptor
)
stub = HelloStub(intercept_channel)
stub.SayHello(HelloRequest(name='custom-exception'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert client_span.resource == '/helloworld.Hello/SayHello'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'custom'
assert client_span.get_tag(errors.ERROR_TYPE) == 'tests.contrib.grpc.test_grpc._CustomException'
assert client_span.get_tag(errors.ERROR_STACK) is not None
assert client_span.get_tag('grpc.status.code') == 'StatusCode.INTERNAL'
# no exception on server end
assert server_span.resource == '/helloworld.Hello/SayHello'
assert server_span.error == 0
assert server_span.get_tag(errors.ERROR_MSG) is None
assert server_span.get_tag(errors.ERROR_TYPE) is None
assert server_span.get_tag(errors.ERROR_STACK) is None
def test_client_cancellation(self):
# unpatch and restart server since we are only testing here caller cancellation
self._stop_server()
_unpatch_server()
self._start_server()
# have servicer sleep whenever request is handled to ensure we can cancel before server responds
# to requests
requests_iterator = iter(
HelloRequest(name=name) for name in
['sleep']
)
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
with self.assertRaises(grpc.RpcError):
stub = HelloStub(channel)
responses = stub.SayHelloRepeatedly(requests_iterator)
responses.cancel()
next(responses)
spans = self.get_spans_with_sync_and_assert(size=1)
client_span = spans[0]
assert client_span.resource == '/helloworld.Hello/SayHelloRepeatedly'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'Locally cancelled by application!'
assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.CANCELLED'
assert client_span.get_tag(errors.ERROR_STACK) is None
assert client_span.get_tag('grpc.status.code') == 'StatusCode.CANCELLED'
def test_unary_exception(self):
with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel:
stub = HelloStub(channel)
with self.assertRaises(grpc.RpcError):
stub.SayHello(HelloRequest(name='exception'))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert client_span.resource == '/helloworld.Hello/SayHello'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'exception'
assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT'
assert client_span.get_tag('grpc.status.code') == 'StatusCode.INVALID_ARGUMENT'
assert server_span.resource == '/helloworld.Hello/SayHello'
assert server_span.error == 1
assert server_span.get_tag(errors.ERROR_MSG) == 'exception'
assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT'
assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK)
assert 'grpc.StatusCode.INVALID_ARGUMENT' in server_span.get_tag(errors.ERROR_STACK)
def test_client_stream_exception(self):
requests_iterator = iter(
HelloRequest(name=name) for name in
['first', 'exception']
)
with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel:
stub = HelloStub(channel)
with self.assertRaises(grpc.RpcError):
stub.SayHelloLast(requests_iterator)
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert client_span.resource == '/helloworld.Hello/SayHelloLast'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'exception'
assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT'
assert client_span.get_tag('grpc.status.code') == 'StatusCode.INVALID_ARGUMENT'
assert server_span.resource == '/helloworld.Hello/SayHelloLast'
assert server_span.error == 1
assert server_span.get_tag(errors.ERROR_MSG) == 'exception'
assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT'
assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK)
assert 'grpc.StatusCode.INVALID_ARGUMENT' in server_span.get_tag(errors.ERROR_STACK)
def test_server_stream_exception(self):
with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel:
stub = HelloStub(channel)
with self.assertRaises(grpc.RpcError):
list(stub.SayHelloTwice(HelloRequest(name='exception')))
spans = self.get_spans_with_sync_and_assert(size=2)
server_span, client_span = spans
assert client_span.resource == '/helloworld.Hello/SayHelloTwice'
assert client_span.error == 1
assert client_span.get_tag(errors.ERROR_MSG) == 'exception'
assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.RESOURCE_EXHAUSTED'
assert client_span.get_tag('grpc.status.code') == 'StatusCode.RESOURCE_EXHAUSTED'
assert server_span.resource == '/helloworld.Hello/SayHelloTwice'
assert server_span.error == 1
assert server_span.get_tag(errors.ERROR_MSG) == 'exception'
assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.RESOURCE_EXHAUSTED'
assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK)
assert 'grpc.StatusCode.RESOURCE_EXHAUSTED' in server_span.get_tag(errors.ERROR_STACK)
class _HelloServicer(HelloServicer):
def SayHello(self, request, context):
if request.name == 'propogator':
metadata = context.invocation_metadata()
context.set_code(grpc.StatusCode.OK)
message = ';'.join(
w.key + '=' + w.value
for w in metadata
if w.key.startswith('x-datadog')
)
return HelloReply(message=message)
if request.name == 'abort':
context.abort(grpc.StatusCode.ABORTED, 'aborted')
if request.name == 'exception':
context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'exception')
return HelloReply(message='Hello {}'.format(request.name))
def SayHelloTwice(self, request, context):
yield HelloReply(message='first response')
if request.name == 'exception':
context.abort(grpc.StatusCode.RESOURCE_EXHAUSTED, 'exception')
yield HelloReply(message='secondresponse')
def SayHelloLast(self, request_iterator, context):
names = [r.name for r in | |
import pytest
from textwrap import dedent
import attr
import numpy as np
import re
from bionic.descriptors.parsing import dnode_from_descriptor
from bionic.exception import CodeVersioningError
from bionic.utils.misc import single_element, single_unique_element
import bionic as bn
from ..helpers import import_code
class ModelFlowHarness:
"""
Manages a Bionic flow and tests its behavior against a "model" implementation.
This class wraps both a Bionic flow and a simplified model implementation of the
same flow. The flows are kept in sync, allowing their behavior to be checked against
each other. This allows large flows to be constructed and validated
programmatically.
The model flow implementation supports the following functionality:
- creating string-valued entities derived from a variable number of other entities
- making functional changes to the definition of any entity
- testing the value of an entity and the set of (user-provided) functions computed
in the process
- persisted and non-persisted entities
- memoized and non-memoized entities
- functions with multiple entity outputs
This class is similar to the ``SimpleFlowModel`` ("SFM") in ``test_persisted_fuzz``,
but with the following differences:
- SFM does not support non-persisted entities or multiple entity outputs
- this class does not support the following:
- non-deterministic (``changes_per_run``) entities
- non-functional updates
- cloud caching
This class has a more sophisticated implementation than SFM, so it should be
easier to port SFM's features to this class than vice versa. If we can port all of
the above features, we can remove SFM altogether.
"""
def __init__(
self,
builder,
make_list,
parallel_execution_enabled=False,
):
self._builder = builder
self._versioning_mode = "manual"
self._parallel_execution_enabled = parallel_execution_enabled
self._expect_exact_call_count_matches = True
self._query_caching_enabled = True
# TODO Would it make sense to factor all the model state into a single ModelFlow
# object?
self._entities_by_name = {}
self._descriptors_computed_by_flow = make_list()
self._descriptors_computed_by_model = make_list()
self._clear_active_flow()
def set_versioning_mode(self, mode):
assert mode in ("manual", "assist", "auto")
self._versioning_mode = mode
self._builder.set("core__versioning_mode", mode)
def disable_query_caching(self):
self._builder.set("core__temp_memoize_if_uncached", False)
self._query_caching_enabled = False
def disable_exact_call_counting(self):
self._expect_exact_call_count_matches = False
def get_all_entity_names(self):
return list(self._entities_by_name.keys())
def create_entity(self, should_persist=True, should_memoize=True):
name = f"e{len(self._entities_by_name) + 1}"
entity = ModelEntity(
name=name,
should_persist=should_persist,
should_memoize=should_memoize,
)
assert name not in self._entities_by_name
self._entities_by_name[name] = entity
return name
def add_binding(self, out_descriptor, dep_entity_names, use_tuples_for_output=True):
self._clear_active_flow()
out_dnode = dnode_from_descriptor(out_descriptor)
out_entities = list(
map(self._entities_by_name.get, out_dnode.all_entity_names())
)
dep_entities = list(map(self._entities_by_name.get, dep_entity_names))
should_persists = set(entity.should_persist for entity in out_entities)
(should_persist,) = should_persists
binding = ModelBinding(
out_dnode=out_dnode,
out_entities=out_entities,
dep_entities=dep_entities,
use_tuples_for_output=use_tuples_for_output,
)
for out_entity in out_entities:
assert out_entity.binding is None
out_entity.binding = binding
for dep_entity_name in dep_entity_names:
dep_entity = self._entities_by_name[dep_entity_name]
dep_entity.dependent_entities.append(out_entity)
self._add_binding_to_flow(binding)
def update_binding(
self,
entity_name,
change_func_version=True,
update_version_annotation=True,
):
self._clear_active_flow()
entity = self._entities_by_name[entity_name]
entity.binding.update(
change_func=change_func_version,
update_annotation=update_version_annotation,
versioning_mode=self._versioning_mode,
)
self._add_binding_to_flow(entity.binding)
def query_and_check_entity(self, entity_name, expect_exact_call_count_matches=None):
if expect_exact_call_count_matches is None:
expect_exact_call_count_matches = self._expect_exact_call_count_matches
self._build_active_flow_if_missing()
flow_exception = None
try:
flow_value = self._active_flow.get(entity_name)
except CodeVersioningError as e:
flow_exception = e
context = ModelExecutionContext(
parallel_execution_enabled=self._parallel_execution_enabled,
query_caching_enabled=self._query_caching_enabled,
versioning_mode=self._versioning_mode,
memoized_flow_values_by_entity_name=self._active_model_memoized_values_by_entity_name,
memoized_query_values_by_entity_name={},
computed_descriptors=self._descriptors_computed_by_model,
)
model_exception = None
try:
model_value = self._compute_model_value(entity_name, context)
except CodeVersioningError as e:
model_exception = e
assert (flow_exception is None) == (model_exception is None)
if model_exception is None:
assert flow_value == model_value
if expect_exact_call_count_matches:
assert sorted(self._descriptors_computed_by_flow) == sorted(
self._descriptors_computed_by_model
)
else:
assert set(self._descriptors_computed_by_flow) == set(
self._descriptors_computed_by_model
)
self._clear_called_descriptors()
else:
assert flow_exception.__class__ == model_exception.__class__
# Now we know that a CodeVersioningError has been thrown, we'll try to get
# the flow back into a good state by fixing the versioning error. We can
# pick any of the entities mentioned in the error, since they're all set
# by the same binding. We'll keep recursively retrying until all the errors
# are fixed and the query succeeds. At that point both the real flow and the
# model flow should have computed and persisted the same set of descriptors,
# so we'll be back in a known-good state.
# (That's why we do the fixing inside this function instead of leaving it
# to the caller: it's a little awkward, but it lets us guarantee that both
# flows will be in sync by the time we return.)
# It would be nice if we could also assert that both exceptions have the
# same `bad_descriptor` field, but if there are multiple bad descriptors,
# which one we get first is not defined. In theory we could, after fixing
# everything, check that the sets of bad descriptors matched, but I'm not
# sure it's worth the extra bookkeeping.
bad_entity_name = dnode_from_descriptor(
model_exception.bad_descriptor
).all_entity_names()[0]
self.update_binding(
entity_name=bad_entity_name,
change_func_version=False,
update_version_annotation=True,
)
self.query_and_check_entity(
entity_name,
# Since our query was interrupted, some extra non-persisted entities
# may have been computed, so we can't expect the final counts to line
# up exactly.
expect_exact_call_count_matches=False,
)
# This is just used for debugging.
def save_dag(self, filename):
self._builder.build().render_dag().save(filename)
def _clear_active_flow(self):
self._active_flow = None
self._active_model_memoized_values_by_entity_name = None
def _build_active_flow_if_missing(self):
if self._active_flow is None:
self._active_flow = self._builder.build()
self._active_model_memoized_values_by_entity_name = {}
def _add_binding_to_flow(self, binding):
binding.add_to_builder(self._builder, self._descriptors_computed_by_flow)
def _compute_model_value(self, entity_name, context):
binding = self._entities_by_name[entity_name].binding
return binding.compute_entity_value(entity_name, context)
def _clear_called_descriptors(self):
self._descriptors_computed_by_flow[:] = []
self._descriptors_computed_by_model[:] = []
@attr.s
class ModelExecutionContext:
"""
Represents the state associated with a modeled ``flow.get()`` call.
"""
parallel_execution_enabled = attr.ib()
query_caching_enabled = attr.ib()
versioning_mode = attr.ib()
memoized_flow_values_by_entity_name = attr.ib()
memoized_query_values_by_entity_name = attr.ib()
computed_descriptors = attr.ib()
def evolve(self, **kwargs):
return attr.evolve(self, **kwargs)
@attr.s
class ModelEntity:
"""Represents an entity in a Bionic flow."""
name = attr.ib()
should_persist = attr.ib()
should_memoize = attr.ib()
binding = attr.ib(default=None)
dependent_entities = attr.ib(default=attr.Factory(list))
def value_from_deps(self, dep_values):
return f"{self.name}.{self.binding.func_version}({','.join(dep_values)})"
def value_code_fragment(self):
return (
f"'{self.name}.{self.binding.func_version}('"
+ " + ',' ".join(f"+ {entity.name}" for entity in self.binding.dep_entities)
+ " + ')'"
)
@attr.s
class ModelBinding:
"""
Represents a user-provided function that computes the value of one or more entities.
"""
out_dnode = attr.ib()
out_entities = attr.ib()
dep_entities = attr.ib()
use_tuples_for_output = attr.ib()
func_version = attr.ib(default=1)
annotated_func_version = attr.ib(default=1)
has_persisted_values = attr.ib(default=False)
persisted_values_by_entity_name = attr.ib(default=None)
persisted_values_stale_ancestor = attr.ib(default=None)
no_descendant_can_have_persisted_values = attr.ib(default=True)
no_descendant_can_have_stale_persisted_values = attr.ib(default=True)
def update(self, change_func, update_annotation, versioning_mode):
func_changed = False
annotation_changed = False
if change_func:
self.func_version += 1
func_changed = True
if update_annotation and self.annotated_func_version != self.func_version:
self.annotated_func_version = self.func_version
annotation_changed = True
if versioning_mode == "manual":
if annotation_changed:
self.clear_persisted_values()
elif versioning_mode == "assist":
if annotation_changed:
self.clear_persisted_values()
elif func_changed:
stale_ancestor = self.out_entities[0].name
self.mark_persisted_values_stale(stale_ancestor)
elif versioning_mode == "auto":
if func_changed or annotation_changed:
self.clear_persisted_values()
else:
assert False
def clear_persisted_values(self):
if self.no_descendant_can_have_persisted_values:
return
if self.has_persisted_values:
self.has_persisted_values = False
self.persisted_values_by_entity_name = None
for out_entity in self.out_entities:
for dependent_entity in out_entity.dependent_entities:
dependent_entity.binding.clear_persisted_values()
self.no_descendant_can_have_persisted_values = True
self.no_descendant_can_have_stale_persisted_values = True
def mark_persisted_values_stale(self, stale_ancestor):
if self.no_descendant_can_have_stale_persisted_values:
return
if self.has_persisted_values and self.persisted_values_stale_ancestor is None:
self.persisted_values_stale_ancestor = stale_ancestor
for out_entity in self.out_entities:
for dependent_entity in out_entity.dependent_entities:
dependent_entity.binding.mark_persisted_values_stale(stale_ancestor)
self.no_descendant_can_have_stale_persisted_values = True
def value_code_fragment_for_outputs(self):
return " ".join(
f"({entity.value_code_fragment()})," for entity in self.out_entities
)
def value_code_fragment_for_returns(self, out_dnode=None):
if out_dnode is None:
out_dnode = self.out_dnode
if out_dnode.is_entity():
entity_name = out_dnode.assume_entity().name
entity = single_element(
entity for entity in self.out_entities if entity.name == entity_name
)
return entity.value_code_fragment()
elif out_dnode.is_tuple():
return " ".join(
f"({self.value_code_fragment_for_returns(child_dnode)}),"
for child_dnode in out_dnode.children
)
else:
assert False
def get_and_save_values_from_deps(self, dep_values):
values_by_entity_name = {
entity.name: entity.value_from_deps(dep_values)
for entity in self.out_entities
}
if self.should_persist:
self.persisted_values_by_entity_name = values_by_entity_name
self.persisted_values_stale_ancestor = None
self.has_persisted_values = True
self.no_descendant_can_have_persisted_values = False
self.no_descendant_can_have_stale_persisted_values = False
return values_by_entity_name
def compute_entity_value(self, entity_name, context):
if entity_name in context.memoized_query_values_by_entity_name:
return context.memoized_query_values_by_entity_name[entity_name]
if entity_name in context.memoized_flow_values_by_entity_name:
return context.memoized_flow_values_by_entity_name[entity_name]
if not self.has_persisted_values:
running_in_subprocess = (
context.parallel_execution_enabled and self.should_persist
)
if running_in_subprocess:
dep_context = context.evolve(
memoized_flow_values_by_entity_name={},
memoized_query_values_by_entity_name={},
)
else:
dep_context = context
dep_values = [
dep_entity.binding.compute_entity_value(dep_entity.name, dep_context)
for dep_entity in self.dep_entities
]
values_by_entity_name = self.get_and_save_values_from_deps(dep_values)
context.computed_descriptors.append(self.out_descriptor)
else:
if self.persisted_values_stale_ancestor is not None:
assert context.versioning_mode == "assist"
stale_ancestor = self.persisted_values_stale_ancestor
raise CodeVersioningError("Binding is stale", stale_ancestor)
values_by_entity_name = self.persisted_values_by_entity_name
if self.should_memoize:
context.memoized_flow_values_by_entity_name.update(values_by_entity_name)
elif not self.should_persist and context.query_caching_enabled:
context.memoized_query_values_by_entity_name.update(values_by_entity_name)
return values_by_entity_name[entity_name]
def add_to_builder(self, builder, descriptors_computed_by_flow):
out_entity_names = [entity.name for entity in self.out_entities]
joined_dep_entity_names = ", ".join(self.dep_entity_names)
if self.use_tuples_for_output:
output_decorator_fragment = f"""
@bn.returns({self.out_descriptor!r})
""".strip()
output_value_fragment = self.value_code_fragment_for_returns()
else:
output_decorator_fragment = f"""
@bn.outputs({', '.join(repr(name) for name in out_entity_names)})
""".strip()
output_value_fragment = self.value_code_fragment_for_outputs()
vars_dict = {
"bn": bn,
"builder": builder,
"record_call": descriptors_computed_by_flow.append,
}
raw_func_code = f"""
@builder
{output_decorator_fragment}
@bn.version_no_warnings({self.annotated_func_version})
@bn.persist({self.should_persist})
@bn.memoize({self.should_memoize})
def _({joined_dep_entity_names}):
record_call({self.out_descriptor!r})
return {output_value_fragment}
"""
import_code(dedent(raw_func_code), vars_dict=vars_dict)
@property
def dep_entity_names(self):
return [entity.name for entity in | |
from math import gcd
from ..utils.math import cartesian
import numpy as np
import warnings
import copy
from .array_elements import ISOTROPIC_SCALAR_SENSOR
from .perturbations import LocationErrors, GainErrors, PhaseErrors, \
MutualCoupling
class ArrayDesign:
"""Base class for all array designs.
Arrays can be 1D, 2D, or 3D. Consider the standard cartesian coordinate
system. We define 1D, 2D, and 3D arrays as follows:
* 1D arrays are linear arrays along the x-axis.
* 2D arrays are planar arrays lying within the xy-plane.
* 3D arrays are not restricted, whose element can exist anywhere in the 3D
space.
We store the element locations with an m x d array, where m denotes the
number of elements (size) of the array.
* For 1D arrays, d equals one, and the i-th row of the m x 1 array stores
the x coordinate of the i-th element.
* For 2D arrays, d equals two, and the i-th row of the m x 2 array stores
the x and y coordinates of the i-th element.
* For 3D arrays, d equals three, and the i-th row of the m x 3 array stores
the x, y, and z coordinates of the i-th element.
While this class is generally intended for internal use. You can also use
this class to create custom arrays. Just to make sure that you do not modify
``locations`` after creating the array.
Args:
locations: A list or ndarray specifying the element locations. For
1D arrays, ``locations`` can be either a 1D list/ndarray, or
an m x 1 list/ndarray, where m is the number of elements. For 2D
or 3D arrays, ``locations`` must be a 2D list/ndarray of shape
m x d, where d is 2 or 3. If the input is an ndarray, it will
not be copied and should not be changed after creating the
array design.
name (str): Name of the array design.
perturbations (list or dict): If a list is given, it should be a list of
:class:`~doatools.model.perturbations.ArrayPerturbation`.
If a dictionary is given, it should be a dictionary containing the
perturbation definitions. Correspinding
:class:`~doatools.model.perturbations.ArrayPerturbation` will be
created automatically. The keys should be among the following:
* ``'location_errors'``
* ``'gain_errors'`` (relative, -0.2 means 0.8 * original gain)
* ``'phase_errors'`` (in radians)
* ``'mutual_coupling'``
The values in the dictionary are two-element tuples. The first
element is an :class:`~numpy.ndarray` representing the parameters,
and the second element is a boolean specifying whether these
parameters are known in prior.
element (~doatools.model.array_elements.ArrayElement): Array element
(sensor) used in this array. Default value is an instance of
:class:`~doatools.model.array_elements.IsotropicScalarSensor`.
Notes:
Array designs are generally not changed after creation. Because array
design objects are passed around when computing steering matrices,
weight functions, etc., having a mutable internal state leads to more
complexities and potential unexpected results. Although the internal
states are generally accessible in Python, please refrain from modifying
them unless you are aware of the side effects.
"""
def __init__(self, locations, name, perturbations=[],
element=ISOTROPIC_SCALAR_SENSOR):
if not isinstance(locations, np.ndarray):
locations = np.array(locations)
if locations.ndim > 2:
raise ValueError('Expecting a 1D vector or a 2D matrix.')
if locations.ndim == 1:
locations = locations.reshape((-1, 1))
elif locations.shape[1] > 3:
raise ValueError('Array can only be 1D, 2D or 3D.')
self._locations = locations
self._name = name
self._element = element
# Validate and add perturbations
self._perturbations = {}
self._add_perturbation_from_list(self._parse_input_perturbations(perturbations))
@property
def name(self):
"""Retrieves the name of this array."""
return self._name
@property
def size(self):
"""Retrieves the number of elements in the array."""
return self._locations.shape[0]
@property
def output_size(self):
"""Retrieves the output size of the array.
In generate, the output size should be equal to ``size``. However, for
vector sensor arrays, the output size is greater than the array size.
"""
return self.size
@property
def element_locations(self):
"""Retrieves the nominal element locations.
Returns:
An M x d matrix, where M is the number of elements and d is the
number of dimensions of the nominal array.
"""
return self._locations.copy()
@property
def actual_element_locations(self):
"""Retrieves the actual element locations, considering location errors.
Returns:
An M x d matrix, where M is the number of elements and d is the
maximum of the following two:
1. number of dimensions of the nominal array;
2. number of dimensions of the sensor location errors.
"""
locations = self._locations
for p in self._perturbations.values():
locations = p.perturb_sensor_locations(locations)
return locations
@property
def element(self):
"""Retrieves the array element."""
return self._element
@property
def is_perturbed(self):
"""Returns if the array contains perturbations."""
return len(self._perturbations) > 0
@property
def ndim(self):
"""Retrieves the number of dimensions of the nominal array.
The number of dimensions is defined as the number of columns of the
ndarray storing the nominal array element locations. It does not
reflect the number of dimensions of the minimal subspace in which the
nominal array lies. For instance, if the element locations are given by
``[[0, 0], [1, 1], [2, 2]]``, ``ndim`` equals to 2 instead of 1, despite
the fact that this array is a linear array.
Perturbations do not affect this value.
"""
return self._locations.shape[1]
@property
def actual_ndim(self):
"""Retrieves the number of dimensions of the array, considering location errors."""
return self.actual_element_locations.shape[1]
def has_perturbation(self, ptype):
"""Checks if the array has the given type of perturbation."""
return ptype in self._perturbations
def is_perturbation_known(self, ptype):
"""Checks if the specified perturbation is known in prior."""
return self._perturbations[ptype].is_known
def get_perturbation_params(self, ptype):
"""Retrieves the parameters for the specified perturbation type."""
return self._perturbations[ptype].params
@property
def perturbations(self):
"""Retrieves a list of all perturbations."""
# Here we have a deep copy.
return list(self._perturbations.values())
def _add_perturbation_from_list(self, perturbations, raise_on_override=True):
"""Adds perturbations from a list of perturbations.
Args:
perturbations (list): A list of
:class:`~doatools.model.perturbations.ArrayPerturbation.`.
raise_on_override: Specifies whether an error should be raised when
a new perturbation of the same type overrides the existing one.
"""
for p in perturbations:
applicable, msg = p.is_applicable_to(self)
if not applicable:
raise RuntimeError(msg)
p_class = p.__class__
if p_class in self._perturbations and raise_on_override:
raise RuntimeError(
'Cannot have more than one perturbations of the same type. '
'Attempting to add another perturbation of the type {0}.'
.format(p_class.__name__)
)
self._perturbations[p_class] = p
def _parse_input_perturbations(self, perturbations):
if isinstance(perturbations, dict):
factories = {
'location_errors': (lambda p, k: LocationErrors(p, k)),
'gain_errors': (lambda p, k: GainErrors(p, k)),
'phase_errors': (lambda p, k: PhaseErrors(p, k)),
'mutual_coupling': (lambda p, k: MutualCoupling(p, k))
}
perturbations = [factories[k](v[0], v[1]) for k, v in perturbations.items()]
return perturbations
def get_perturbed_copy(self, perturbations, new_name=None):
"""Returns a copy of this array design but with the specified
perturbations.
The specified perturbations will replace the existing ones.
Notes:
The default implementation performs a shallow copy of all existing
fields using :meth:``~copy.copy``. Override this method if special
operations are required.
Args:
perturbations (list or dict): If a list is given, it should be a
list of
:class:`~doatools.model.perturbations.ArrayPerturbation`.
If a dictionary is given, it should be a dictionary containing
the perturbation definitions. Correspinding
:class:`~doatools.model.perturbations.ArrayPerturbation` will be
created automatically. The keys should be among the following:
* ``'location_errors'``
* ``'gain_errors'`` (relative, -0.2 means 0.8 * original gain)
* ``'phase_errors'`` (in radians)
* ``'mutual_coupling'``
The values in the dictionary are two-element tuples. The first
element is an :class:`~numpy.ndarray` representing the
parameters, and the second element is a boolean specifying
whether these parameters are known in prior.
new_name (str): An optional new name for the resulting array design.
If not provided, the name of the original array design will be
used.
"""
array = self.get_perturbation_free_copy(new_name)
# Merge perturbation parameters.
new_perturbations = self._parse_input_perturbations(perturbations)
array._perturbations = self._perturbations.copy()
array._add_perturbation_from_list(new_perturbations, False)
return array
def get_perturbation_free_copy(self, new_name=None):
"""Returns a perturbation-free copy of this array design.
Notes:
The default implementation performs a shallow copy of all existing
fields using :meth:``~copy.copy``. Override this method if special
operations are required.
Args:
new_name (str): An optional new name for the resulting array design.
If not provided, the name of the original array design will be
used.
"""
if new_name is None:
new_name = self._name
array | |
import datanator.config.core
from datanator.util import mongo_util
from datanator.util import file_util, chem_util
from datanator.util import molecule_util
import requests
from xml import etree
import libsbml
import re
import datetime
import bs4
import html
import csv
import pubchempy
import sys
import Bio.Alphabet
import Bio.SeqUtils
import math
import logging
import pymongo
class SabioRk:
def __init__(self, cache_dirname=None, MongoDB=None, replicaSet=None, db=None,
verbose=False, max_entries=float('inf'), username=None,
password=<PASSWORD>, authSource='admin', webservice_batch_size=50,
excel_batch_size=50):
self.cache_dirname = cache_dirname
self.MongoDB = MongoDB
self.replicaSet = replicaSet
self.db = db
self.verbose = verbose
self.max_entries = max_entries
self.client, self.db_obj, self.collection = mongo_util.MongoUtil(
MongoDB=MongoDB, db=db, username=username, password=password,
authSource=authSource).con_db('sabio_rk')
self.client, self.db_obj, self.collection_compound = mongo_util.MongoUtil(
MongoDB=MongoDB, db=db, username=username, password=password,
authSource=authSource).con_db('sabio_compound')
self.excel_batch_size = excel_batch_size
self.ENDPOINT_DOMAINS = {
'sabio_rk': 'http://sabiork.h-its.org',
'uniprot': 'http://www.uniprot.org',
}
self.ENDPOINT_KINETIC_LAWS_SEARCH = self.ENDPOINT_DOMAINS['sabio_rk'] + \
'/sabioRestWebServices/searchKineticLaws/entryIDs'
self.ENDPOINT_WEBSERVICE = self.ENDPOINT_DOMAINS['sabio_rk'] + \
'/sabioRestWebServices/kineticLaws'
self.ENDPOINT_EXCEL_EXPORT = self.ENDPOINT_DOMAINS['sabio_rk'] + \
'/entry/exportToExcelCustomizable'
self.ENDPOINT_COMPOUNDS_PAGE = self.ENDPOINT_DOMAINS['sabio_rk'] + \
'/compdetails.jsp'
self.ENDPOINT_KINETIC_LAWS_PAGE = self.ENDPOINT_DOMAINS['sabio_rk'] + \
'/kindatadirectiframe.jsp'
self.SKIP_KINETIC_LAW_IDS = (51286,)
self.PUBCHEM_MAX_TRIES = 10
self.PUBCHEM_TRY_DELAY = 0.25
self.webservice_batch_size = webservice_batch_size
self.file_manager = file_util.FileUtil()
self.chem_manager = chem_util.ChemUtil()
def load_content(self):
""" Download the content of SABIO-RK and store it to a remote mongoDB. """
def ids_to_process(total_ids, query):
exisitng_ids = self.collection.distinct('kinlaw_id', filter=query)
exisitng_ids.sort()
loaded_new_ids = list(set(total_ids).intersection(exisitng_ids))
loaded_new_ids.sort()
return loaded_new_ids
##################################
##################################
# determine ids of kinetic laws
if self.verbose:
print('Downloading the IDs of the kinetic laws ...')
ids = self.load_kinetic_law_ids()
if self.verbose:
print(' Downloaded {} IDs'.format(len(ids)))
##################################
##################################
# remove bad IDs
ids = list(filter(lambda id: id not in self.SKIP_KINETIC_LAW_IDS, ids))
# sort ids
ids.sort()
# load only `max_entries` IDs
if len(ids) > self.max_entries:
ids = ids[0:self.max_entries]
##################################
##################################
# download kinetic laws
# exisitng_ids = self.collection.distinct('kinlaw_id')
# query = {'parameters.norm_units': {'$exists': False}}
# docs = self.collection.find(filter=query, projection={'kinlaw_id': 1})
# exisitng_ids = []
# for doc in docs:
# exisitng_ids.append(doc['kinlaw_id'])
# exisitng_ids.sort()
# if exisitng_ids is None:
# exisitng_ids = []
# new_ids = list(set(ids).difference(set(exisitng_ids)))
# if len(exisitng_ids) != 0:
# new_ids.append(exisitng_ids[-1])
# new_ids.sort()
new_ids = []
doc = self.collection.find_one(filter={}, projection={'kinlaw_id': 1,'parameters': 1}, sort=[('kinlaw_id', -1)], limit=1)
print(doc['kinlaw_id'])
last_modified = doc['parameters'][0]['modified']
query = {'parameters.modified': {'$lte': last_modified}}
loaded_new_ids = ids_to_process(ids, query)
if self.verbose:
print('Downloading {} kinetic laws ...'.format(len(loaded_new_ids)))
missing_ids = self.load_kinetic_laws(loaded_new_ids)
if self.verbose:
print(' done')
###################################
###################################
# fill in missing information from Excel export
# query = {'mechanism': {'$exists': False}}
doc = self.collection.find_one(filter={}, projection={'kinlaw_id': 1,'parameters': 1}, sort=[('kinlaw_id', -1)], limit=1)
last_modified = doc['parameters'][0]['modified']
query = {'parameters.modified': {'$lte': last_modified}}
loaded_new_ids = ids_to_process(ids, query)
if self.verbose:
print('Updating {} kinetic laws ...'.format(len(loaded_new_ids)))
self.load_missing_kinetic_law_information_from_tsv(loaded_new_ids)
if self.verbose:
print(' done')
# ##################################
# ##################################
# # fill in missing information from HTML pages
# constraint_0 = {'enzyme.subunits.uniprot': {'$exists': True}}
# constraint_1 = {'enzyme.subunits.coefficient': {'$exists': False}}
# query = {'$and':[constraint_0, constraint_1]}
# loaded_new_ids = ids_to_process(ids, query)
# if self.verbose:
# print('Updating {} kinetic laws ...'.format(len(loaded_new_ids)))
# self.load_missing_enzyme_information_from_html(loaded_new_ids)
# if self.verbose:
# print(' done')
##################################
##################################
query = {'parameters.norm_value': {'$exists': False}}
loaded_new_ids = ids_to_process(ids, query)
if self.verbose:
print('Normalizing {} parameter values ...'.format(len(loaded_new_ids)))
self.normalize_kinetic_laws(loaded_new_ids)
if self.verbose:
print(' done')
# ##################################
# ##################################
# constraint_0 = {'products.structures': {'$exists': True}}
# # constraint_1 = {'products.structures.InChI_Key': {'$exists': True}}
# constraint_1 = {'reactants.structures': {'$exists': True}}
# query = {'$or': [constraint_0, constraint_1]}
# loaded_new_ids = ids_to_process(ids, query)
# if self.verbose:
# print('Adding {} inchikey values ...'.format(len(loaded_new_ids)))
# self.add_inchi_hash(loaded_new_ids)
# if self.verbose:
# print(' Completed')
def load_kinetic_law_ids(self):
""" Download the IDs of all of the kinetic laws stored in SABIO-RK
Returns:
:obj:`list` of :obj:`int`: list of kinetic law IDs
"""
# create session
response = requests.get(self.ENDPOINT_KINETIC_LAWS_SEARCH, params={
'q': 'DateSubmitted:01/01/2000',
})
response.raise_for_status()
# get IDs of kinetic laws
root = etree.ElementTree.fromstring(response.text)
ids = [int(float(node.text)) for node in root.findall('SabioEntryID')]
# sort ids
ids.sort()
# return IDs
return ids
def load_kinetic_laws(self, ids):
""" Download kinetic laws from SABIO-RK
Args:
ids (:obj:`list` of :obj:`int`): list of IDs of kinetic laws to download
Raises:
:obj:`Error`: if an HTTP request fails
"""
# todo: scrape strain, recombinant, experiment type information from web pages
session = requests
batch_size = self.webservice_batch_size
loaded_ids = []
for i_batch in range(int(math.ceil(float(len(ids)) / batch_size))):
if self.verbose and (i_batch % max(1, 100. / batch_size) == 0):
print(' Downloading kinetic laws {}-{} of {} in SBML format'.format(
i_batch * batch_size + 1,
min(len(ids), i_batch * batch_size + max(100, batch_size)),
len(ids)))
batch_ids = ids[i_batch *
batch_size:min((i_batch + 1) * batch_size, len(ids))]
response = session.get(self.ENDPOINT_WEBSERVICE, params={
'kinlawids': ','.join(str(id) for id in batch_ids),
})
if not response.text:
raise Exception('Unable to download kinetic laws with ids {}'.format(
', '.join([str(id) for id in batch_ids])))
response.raise_for_status()
loaded_ids += self.create_kinetic_laws_from_sbml(batch_ids, response.text)
not_loaded_ids = list(set(ids).difference(loaded_ids))
if not_loaded_ids:
not_loaded_ids.sort()
warning = 'Several kinetic laws were not found:\n {}'.format(
'\n '.join([str(id) for id in not_loaded_ids]))
logging.warning(warning)
return not_loaded_ids
def create_kinetic_laws_from_sbml(self, ids, sbml):
""" Add kinetic laws defined in an SBML file to the local mongodb database
Args:
ids (:obj:`list` of :obj:`int`): list kinetic law IDs
sbml (:obj:`str`): SBML representation of one or more kinetic laws (root)
Returns:
:obj:`tuple`:
* :obj:`list` of :obj:`KineticLaw`: list of kinetic laws
* :obj:`list` of :obj:`Compound` or :obj:`Enzyme`: list of species (compounds or enzymes)
* :obj:`list` of :obj:`Compartment`: list of compartments
"""
reader = libsbml.SBMLReader()
doc = reader.readSBMLFromString(sbml)
model = doc.getModel()
functions = {}
functions_sbml = model.getListOfFunctionDefinitions()
for i_function in range(functions_sbml.size()):
function_sbml = functions_sbml.get(i_function)
math_sbml = function_sbml.getMath()
if math_sbml.isLambda() and math_sbml.getNumChildren():
eq = libsbml.formulaToL3String(
math_sbml.getChild(math_sbml.getNumChildren() - 1))
else:
eq = None
if eq in ('', 'NaN'):
eq = None
functions[function_sbml.getId()] = eq
units = {}
units_sbml = model.getListOfUnitDefinitions()
for i_unit in range(units_sbml.size()):
unit_sbml = units_sbml.get(i_unit)
units[unit_sbml.getId()] = unit_sbml.getName()
# species
specie_properties = {}
species_sbml = model.getListOfSpecies()
species = []
for i_specie in range(species_sbml.size()):
specie_sbml = species_sbml.get(i_specie)
specie, properties = self.get_specie_from_sbml(specie_sbml)
species.append(specie)
specie_properties[specie_sbml.getId()] = properties
# kinetic laws
reactions_sbml = model.getListOfReactions()
if reactions_sbml.size() != len(ids):
raise ValueError('{} reactions {} is different from the expected {}'.format(
reaction_sbml.size(), len(ids)))
# kinetic_laws = []
loaded_ids = []
for i_reaction, _id in enumerate(ids):
reaction_sbml = reactions_sbml.get(i_reaction)
if reaction_sbml is None:
print("No reactions parsed from kinlaw_id {}".format(_id))
kinetic_law = self.create_kinetic_law_from_sbml(
_id, reaction_sbml, species, specie_properties, functions, units)
try:
# self.collection.update_one({'kinlaw_id': _id},
# {'$set': kinetic_law},
# upsert=True)
self.collection.update_one({'kinlaw_id': _id},
{'$set': {'parameters': kinetic_law['parameters']}},
upsert=True)
except pymongo.errors.WriteError as err:
logging.error(err)
except TypeError:
print('Issue loading kinlaw_id {}'.format(_id))
loaded_ids.append(_id)
return loaded_ids
def create_kinetic_law_from_sbml(self, id, sbml, root_species, specie_properties, functions, units):
""" Make a kinetic law doc for mongoDB
Args:
id (:obj:`int`): identifier
sbml (:obj:`libsbml.KineticLaw`): SBML-representation of a reaction (reaction_sbml)
species (:obj:`list`): list of species in root sbml
specie_properties (:obj:`dict`): additional properties of the compounds/enzymes
* `is_wildtype` (:obj:`bool`): indicates if the enzyme is wildtype or mutant
* `variant` (:obj:`str`): description of the variant of the eznyme
* `modifier_type` (:obj:`str`): type of the enzyme (e.g. Modifier-Catalyst)
functions (:obj:`dict` of :obj:`str`: :obj:`str`): dictionary of rate law equations (keys = IDs in SBML, values = equations)
units (:obj:`dict` of :obj:`str`: :obj:`str`): dictionary of units (keys = IDs in SBML, values = names)
Returns:
:obj:`dictionary`: kinetic law
Raises:
:obj:`ValueError`: if the temperature is expressed in an unsupported unit
"""
law = sbml.getKineticLaw()
x_refs = self.create_cross_references_from_sbml(law)
reaction_x_refs = self.create_cross_references_from_sbml(sbml)
kinetic_law = {}
# stop if kinetic law entry is empty
if not law.getMetaId():
return None
""" participants """
kinetic_law['reactants'] = []
reactants = sbml.getListOfReactants()
for i_part in range(reactants.size()):
part_sbml = reactants.get(i_part)
compound, compartment = self.get_specie_reference_from_sbml(
part_sbml.getSpecies(), root_species)
compound = self.load_compounds(compound)
if 'structures' not in compound[0].keys():
compound = self.infer_compound_structures_from_names(compound)
part = {
'compartment': compartment,
'coefficient': part_sbml.getStoichiometry()}
react = {**compound[0], **part}
kinetic_law['reactants'].append(react)
kinetic_law['products'] = []
products = sbml.getListOfProducts()
for i_part in range(products.size()):
part_sbml = products.get(i_part)
compound, compartment = self.get_specie_reference_from_sbml(
part_sbml.getSpecies(), root_species)
compound = self.load_compounds(compound)
if 'structures' not in compound[0].keys():
compound = self.infer_compound_structures_from_names(compound)
part = {
'compartment': compartment,
'coefficient': part_sbml.getStoichiometry()}
prod = {**compound[0], **part}
kinetic_law['products'].append(prod)
""" cross references """
# Note: these are stored KineticLaws rather than under Reactions because this seems to how SABIO-RK stores this information.
# For example, kinetic laws 16016 and 28003 are associated with reaction 9930, but they have different EC numbers 172.16.58.3 and
# 192.168.127.12, respectively.
kinetic_law['cross_references'] = list(
filter(lambda x_ref: list(x_ref.keys())[0] not in ['taxonomy'], reaction_x_refs))
# rate_law
kinetic_law['equation'] = functions[law.getMetaId()[5:]]
# parameters
kinetic_law['parameters'] = []
params = law.getListOfLocalParameters()
for i_param in range(params.size()):
param = params.get(i_param)
match = re.match(
| |
<filename>algorithm.py
import copy
import numpy as np
class FFDAlgorithm(object):
def __init__(self, num_x, num_y, num_z, filename, object_points): #定义三坐标轴上控制点个数
self.cp_num_x = num_x
self.cp_num_y = num_y
self.cp_num_z = num_z
self.object_points_initial = object_points
def cover_obj(self, initial=True): #对obj物体做一层网格覆盖
points = np.array(self.object_points_initial) #找到三坐标轴上最大、最小值,来生成最小网格
self.min_x, self.min_y, self.min_z = points.min(axis=0)
self.max_x, self.max_y, self.max_z = points.max(axis=0)
self.nx = (self.max_x - self.min_x) / (self.cp_num_x - 1)
self.ny = (self.max_y - self.min_y) / (self.cp_num_y - 1)
self.nz = (self.max_z - self.min_z) / (self.cp_num_z - 1)
self.changed = {}
if initial:
self.control_points = [#初始化控制点位置
[
[np.array([0.0, 0.0, 0.0]) for z in range(self.cp_num_z)]
for y in range(self.cp_num_y)
]
for x in range(self.cp_num_x)
]
self.cp_locations = [#依据网格大小生成控制点坐标
[
[
np.array(
[
self.min_x + x * self.nx,
self.min_y + y * self.ny,
self.min_z + z * self.nz,
]
)
for z in range(self.cp_num_z)
]
for y in range(self.cp_num_y)
]
for x in range(self.cp_num_x)
]
self.init_cp_locations = copy.deepcopy(
self.cp_locations #深拷贝一份控制点坐标
)
try:
del self.object_points
except:
pass
self.object_points = {}
for x in range(self.cp_num_x):
for y in range(self.cp_num_y):
for z in range(self.cp_num_z):
self.object_points[(x, y, z)] = set()
for point_index in range(len(self.object_points_initial)):
[x, y, z] = self.object_points_initial[point_index]
i = int((x - self.min_x) / self.nx)
j = int((y - self.min_y) / self.ny)
k = int((z - self.min_z) / self.nz)
self.object_points[(i, j, k)].add((point_index, x, y, z))
def read_ffd(self, path):
f = open(path, "r")
self.new_control_points = copy.deepcopy(self.control_points)
self.new_cp_locations = copy.deepcopy(
self.init_cp_locations
)
begin = False
while True:
line = f.readline() #按行读取ffd文件
if not begin:
if line.startswith("#"):
if "#dimension#" in line: #提取维度
line = f.readline()
self.dimension = int(line.split("\n")[0])
continue
if "#offsets of the control points#" in line: #提取偏移量
begin = True
x = 0
y = 0
continue
elif "#control grid size#" in line: #提取控制点个数
size = []
for _ in range(self.dimension):
line = f.readline()
size.append(int(line.split("\n")[0]))
continue
else:
continue
else:
continue
else:
if line == "\n":
x += 1
y = 0
if x == size[0]:
break
else:
continue
else:
line = line.split("\t")[:-1]
for z in range(len(line)):
try:
self.new_control_points[x][y][z] = np.array(
[np.float(i) for i in line[z].split(" ")]
)
except IndexError:
raise
y += 1
for x in range(len(self.new_control_points)):
for y in range(len(self.new_control_points[x])):
for z in range(len(self.new_control_points[x][y])):
self.new_cp_locations[x][y][z] += (
self.new_control_points[x][y][z]
* 3
* (self.nx + self.ny + self.nz)
/ 3 #偏移量较小时按照网格单位长度*偏移量
)
return
def save_cp(self, filename): #保存ffd文件
f = open(filename, "w")
f.write("#dimension#\n")
f.write("3\n")
f.write("#one to one#\n")
f.write("1\n")
f.write("#control grid size#\n")
f.write(str(self.cp_num_x) + "\n")
f.write(str(self.cp_num_y) + "\n")
f.write(str(self.cp_num_z) + "\n")
f.write("#control grid spacing#\n")
f.write(str(self.nx) + "\n")
f.write(str(self.ny) + "\n")
f.write(str(self.nz) + "\n")
f.write("#offsets of the control points#\n")
for x in range(len(self.control_points)):
for y in range(len(self.control_points[x])):
for z in range(len(self.control_points[x][y])):
f.write(
str(self.control_points[x][y][z][0])
+ " "
+ str(self.control_points[x][y][z][1])
+ " "
+ str(self.control_points[x][y][z][2])
+ "\t"
)
f.write("\n")
f.write("\n")
f.close()
return
def B(self, i, u): #B样条变换,下面class为bezier变换
if i == 0:
return (1 - u) ** 3 / 6
elif i == 1:
return (3 * u ** 3 - 6 * u ** 2 + 4) / 6
elif i == 2:
return (-3 * u ** 3 + 3 * u ** 2 + 3 * u + 1) / 6
elif i == 3:
return u ** 3 / 6
def T_local(self, object_point):
[x, y, z] = object_point
i = int((x - self.min_x) / self.nx) - 1
j = int((y - self.min_y) / self.ny) - 1
k = int((z - self.min_z) / self.nz) - 1
u = (x - self.min_x) / self.nx - int((x - self.min_x) / self.nx)
v = (y - self.min_y) / self.ny - int((y - self.min_y) / self.ny)
w = (z - self.min_z) / self.nz - int((z - self.min_z) / self.nz)
result = np.array([0.0, 0.0, 0.0])
for l in range(4):
if 0 <= i + l < self.cp_num_x:
for m in range(4):
if 0 <= j + m < self.cp_num_y:
for n in range(4):
if 0 <= k + n < self.cp_num_z:
result = (
result
+ self.B(l, u)
* self.B(m, v)
* self.B(n, w)
* self.control_points[i + l][j + m][k + n]
)
return result
def changed_reset(self):
del self.changed
self.changed = {}
def changed_update(self, id, location):
self.changed[id] = location
def update_control_point(self):
for (u, v, w), new_location in self.changed.items():
self.control_points[u][v][w] = (
new_location - self.cp_locations[u][v][w]
)
class FFD_Bezier(object):
def __init__(self, num_x, num_y, num_z, filename, object_points): #定义三坐标轴上控制点个数
self.cp_num_x = num_x
self.cp_num_y = num_y
self.cp_num_z = num_z
self.object_points_initial = object_points
def cover_obj(self, initial=True): #对obj物体做一层网格覆盖
points = np.array(self.object_points_initial) #找到三坐标轴上最大、最小值,来生成最小网格
self.min_x, self.min_y, self.min_z = points.min(axis=0)
self.max_x, self.max_y, self.max_z = points.max(axis=0)
self.nx = (self.max_x - self.min_x) / (self.cp_num_x - 1)
self.ny = (self.max_y - self.min_y) / (self.cp_num_y - 1)
self.nz = (self.max_z - self.min_z) / (self.cp_num_z - 1)
self.changed = {}
if initial:
self.control_points = [#初始化控制点位置
[
[np.array([0.0, 0.0, 0.0]) for z in range(self.cp_num_z)]
for y in range(self.cp_num_y)
]
for x in range(self.cp_num_x)
]
self.cp_locations = [#依据网格大小生成控制点坐标
[
[
np.array(
[
self.min_x + x * self.nx,
self.min_y + y * self.ny,
self.min_z + z * self.nz,
]
)
for z in range(self.cp_num_z)
]
for y in range(self.cp_num_y)
]
for x in range(self.cp_num_x)
]
self.init_cp_locations = copy.deepcopy(
self.cp_locations #深拷贝一份控制点坐标
)
try:
del self.object_points
except:
pass
self.object_points = {}
for x in range(self.cp_num_x):
for y in range(self.cp_num_y):
for z in range(self.cp_num_z):
self.object_points[(x, y, z)] = set()
for point_index in range(len(self.object_points_initial)):
[x, y, z] = self.object_points_initial[point_index]
i = int((x - self.min_x) / self.nx)
j = int((y - self.min_y) / self.ny)
k = int((z - self.min_z) / self.nz)
self.object_points[(i, j, k)].add((point_index, x, y, z))
def read_ffd(self, path):
f = open(path, "r")
self.new_control_points = copy.deepcopy(self.control_points)
self.new_cp_locations = copy.deepcopy(
self.init_cp_locations
)
begin = False
while True:
line = f.readline() #按行读取ffd文件
if not begin:
if line.startswith("#"):
if "#dimension#" in line: #提取维度
line = f.readline()
self.dimension = int(line.split("\n")[0])
continue
if "#offsets of the control points#" in line: #提取偏移量
begin = True
x = 0
y = 0
continue
elif "#control grid size#" in line: #提取控制点个数
size = []
for _ in range(self.dimension):
line = f.readline()
size.append(int(line.split("\n")[0]))
continue
else:
continue
else:
continue
else:
if line == "\n":
x += 1
y = 0
if x == size[0]:
break
else:
continue
else:
line = line.split("\t")[:-1]
for z in range(len(line)):
try:
self.new_control_points[x][y][z] = np.array(
[np.float(i) for i in line[z].split(" ")]
)
except IndexError:
raise
y += 1
for x in range(len(self.new_control_points)):
for y in range(len(self.new_control_points[x])):
for z in range(len(self.new_control_points[x][y])):
self.new_cp_locations[x][y][z] += (
self.new_control_points[x][y][z]
* 3
* (self.nx + self.ny + self.nz)
/ 3 #偏移量较小时按照网格单位长度*偏移量
)
return
def save_cp(self, filename): #保存ffd文件
f = open(filename, "w")
f.write("#dimension#\n")
f.write("3\n")
f.write("#one to one#\n")
f.write("1\n")
f.write("#control grid size#\n")
f.write(str(self.cp_num_x) + "\n")
f.write(str(self.cp_num_y) + "\n")
f.write(str(self.cp_num_z) + "\n")
f.write("#control grid spacing#\n")
f.write(str(self.nx) + "\n")
f.write(str(self.ny) + "\n")
f.write(str(self.nz) + "\n")
f.write("#offsets of the control points#\n")
for x in range(len(self.control_points)):
for y in range(len(self.control_points[x])):
for z in range(len(self.control_points[x][y])):
f.write(
str(self.control_points[x][y][z][0])
+ " "
+ str(self.control_points[x][y][z][1])
+ " "
+ str(self.control_points[x][y][z][2])
+ "\t"
)
f.write("\n")
f.write("\n")
f.close()
return
def Bezier(self, i, u):
if i == 0:
return (1 - u) ** 3
elif i == 1:
return 3 * u ** 3 - 6 * u ** 2 + 3 * u
elif i == 2:
return -3 * u ** 3 + 3 * u ** 2
elif i == 3:
return u ** 3
def T_local(self, object_point):
[x, y, z] = object_point
i = int((x - self.min_x) / self.nx) - 1
j = int((y - self.min_y) / self.ny) - 1
k = int((z - self.min_z) / self.nz) - 1
u = (x - self.min_x) / self.nx - int((x - self.min_x) / self.nx)
v = (y - self.min_y) / self.ny - int((y - self.min_y) / self.ny)
w = (z - self.min_z) / self.nz - int((z - self.min_z) / self.nz)
result = np.array([0.0, 0.0, 0.0])
for l in range(4):
if 0 <= i + l < self.cp_num_x:
for m in range(4):
if 0 <= j + m < self.cp_num_y:
for n in range(4):
if 0 <= k + n < self.cp_num_z:
result = (
result
+ self.Bezier(l, u)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import json
import logging
import os
import urllib
import urlparse
import uuid
from contextlib import closing
from datetime import datetime
from types import NoneType
from zipfile import ZipFile
from google.appengine.api import images, search
from google.appengine.ext import db, deferred, ndb
from mcfw.imaging import recolor_png
from typing import Optional, Tuple, List
from mcfw.cache import cached
from mcfw.consts import MISSING
from mcfw.properties import azzert
from mcfw.rpc import arguments, returns, serialize_complex_value
from mcfw.utils import normalize_search_string, chunks
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.embedded_applications import get_embedded_application, EmbeddedApplicationNotFoundException
from rogerthat.bizz.features import Features, mobile_supports_feature
from rogerthat.bizz.forms import get_form
from rogerthat.bizz.friend_helper import FriendHelper
from rogerthat.bizz.friends import userCode, invited_response_receiver, process_invited_response, \
create_accept_decline_buttons, INVITE_SERVICE_ADMIN
from rogerthat.bizz.i18n import check_i18n_status_of_message_flows, get_translator
from rogerthat.bizz.job.update_friends import schedule_update_a_friend_of_service_user, \
schedule_update_all_friends_of_service_user, create_update_friend_requests, convert_friend, \
do_update_friend_request
from rogerthat.bizz.maps.services import cleanup_map_index, add_map_index, \
save_map_service, SearchTag
from rogerthat.bizz.maps.services.places import get_place_details
from rogerthat.bizz.messaging import BrandingNotFoundException, sendMessage, sendForm, ReservedTagException
from rogerthat.bizz.profile import update_friends, create_user_profile, update_password_hash, _validate_name, \
create_service_profile
from rogerthat.bizz.qrtemplate import store_template
from rogerthat.bizz.rtemail import generate_auto_login_url, EMAIL_REGEX
from rogerthat.bizz.service.mfd import get_message_flow_by_key_or_name
from rogerthat.bizz.service.mfd.gen import MessageFlowRun
from rogerthat.bizz.system import unregister_mobile
from rogerthat.capi.services import receiveApiCallResult, updateUserData
from rogerthat.consts import MC_DASHBOARD, OFFICIALLY_SUPPORTED_LANGUAGES, MC_RESERVED_TAG_PREFIX, FA_ICONS
from rogerthat.dal import parent_key, put_and_invalidate_cache, app
from rogerthat.dal.app import get_app_by_user, get_app_by_id, get_apps_by_keys
from rogerthat.dal.friend import get_friends_map, get_friends_map_key_by_user
from rogerthat.dal.messaging import get_message, get_branding
from rogerthat.dal.mfd import get_service_message_flow_design_key_by_name
from rogerthat.dal.mobile import get_user_active_mobiles, get_mobile_key_by_account
from rogerthat.dal.profile import get_search_config, get_service_profile, get_user_profile, \
get_profile_infos, get_profile_info, is_service_identity_user, get_search_locations, \
get_service_or_user_profile, get_profile_key
from rogerthat.dal.roles import get_service_identities_via_user_roles, get_service_roles_by_ids
from rogerthat.dal.service import get_api_keys, get_api_key, get_api_key_count, get_sik, get_service_interaction_def, \
get_service_menu_item_by_coordinates, get_service_identity, get_friend_serviceidentity_connection, \
get_default_service_identity, get_service_identity_not_cached, get_service_identities, get_child_identities, \
get_service_interaction_defs, get_users_connected_to_service_identity, log_service_activity, \
get_service_identities_by_service_identity_users
from rogerthat.models import Profile, APIKey, SIKKey, ServiceInteractionDef, ShortURL, \
QRTemplate, Message, MFRSIKey, ServiceMenuDef, Branding, PokeTagMap, ServiceProfile, UserProfile, ServiceIdentity, \
SearchConfigLocation, ProfilePointer, FacebookProfilePointer, MessageFlowDesign, ServiceTranslation, \
ServiceMenuDefTagMap, UserData, FacebookUserProfile, App, MessageFlowRunRecord, \
FriendServiceIdentityConnection, UserContext, UserContextLink, \
ServiceCallBackSettings, ServiceCallBackConfig
from rogerthat.models.properties.friend import FriendDetailTO
from rogerthat.models.properties.keyvalue import KVStore, InvalidKeyError
from rogerthat.models.settings import ServiceInfo
from rogerthat.models.utils import allocate_id, allocate_ids
from rogerthat.rpc import users
from rogerthat.rpc.models import ServiceAPICallback, ServiceLog, RpcCAPICall, Mobile
from rogerthat.rpc.rpc import mapping, logError
from rogerthat.rpc.service import logServiceError, ServiceApiException, ERROR_CODE_UNKNOWN_ERROR, \
ERROR_CODE_WARNING_THRESHOLD, BusinessException, SERVICE_API_CALLBACK_MAPPING
from rogerthat.service.api.friends import invited
from rogerthat.service.api.test import test
from rogerthat.settings import get_server_settings
from rogerthat.to.activity import GeoPointWithTimestampTO, GEO_POINT_FACTOR
from rogerthat.to.friends import UpdateFriendRequestTO, FriendTO, ServiceMenuDetailTO, ServiceMenuItemLinkTO, \
FRIEND_TYPE_SERVICE
from rogerthat.to.messaging import BaseMemberTO, UserMemberTO
from rogerthat.to.messaging.service_callback_results import PokeCallbackResultTO, FlowCallbackResultTypeTO, \
MessageCallbackResultTypeTO, FormCallbackResultTypeTO
from rogerthat.to.profile import SearchConfigTO
from rogerthat.to.qr import QRDetailsTO
from rogerthat.to.service import ServiceConfigurationTO, APIKeyTO, LibraryMenuIconTO, FindServiceResponseTO, \
FindServiceItemTO, FindServiceCategoryTO, ServiceIdentityDetailsTO, ServiceLanguagesTO, UserDetailsTO, \
ReceiveApiCallResultResponseTO, ReceiveApiCallResultRequestTO, SendApiCallCallbackResultTO, \
ServiceCallbackConfigurationTO, UpdateUserDataResponseTO, ServiceCallbackConfigurationRegexTO, \
UpdateUserDataRequestTO
from rogerthat.translations import localize, DEFAULT_LANGUAGE
from rogerthat.utils import now, channel, generate_random_key, parse_color, slog, \
is_flag_set, get_full_language_string, get_officially_supported_languages, try_or_defer, \
bizz_check, base38
from rogerthat.utils.app import get_human_user_from_app_user, get_app_id_from_app_user, create_app_user_by_email
from rogerthat.utils.crypto import md5_hex, sha256_hex
from rogerthat.utils.languages import convert_web_lang_to_iso_lang
from rogerthat.utils.location import haversine, VERY_FAR
from rogerthat.utils.service import get_service_user_from_service_identity_user, create_service_identity_user, \
get_service_identity_tuple, is_valid_service_identifier, remove_slash_default
from rogerthat.utils.transactions import run_in_transaction, run_in_xg_transaction, on_trans_committed, \
on_trans_rollbacked
from solutions.common.integrations.cirklo.models import CirkloMerchant, VoucherProviderId
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
CURRENT_DIR = os.path.dirname(__file__)
ICON_LIBRARY_PATH = os.path.join(CURRENT_DIR, 'icons.zip')
SERVICE_INDEX = "SERVICE_INDEX"
SERVICE_LOCATION_INDEX = "SERVICE_LOCATION_INDEX"
SERVICE_IN_TROUBLE_TAG = u"service_trouble"
IGNORE_SERVICE_TROUBLE_ID = u"ignore_service_trouble"
DISABLE_SERVICE_TROUBLE_ID = u"disable_service_trouble"
MENU_ITEM_LABEL_ATTRS = ['aboutMenuItemLabel', 'messagesMenuItemLabel', 'callMenuItemLabel', 'shareMenuItemLabel']
QR_TEMPLATE_BLUE_PACIFIC = u"Blue Pacific"
QR_TEMPLATE_BROWN_BAG = u"Brown Bag"
QR_TEMPLATE_PINK_PANTHER = u"Pink Panther"
QR_TEMPLATE_BLACK_HAND = u"Black Hand"
class TestCallbackFailedException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_TEST + 0, "Test callback failed")
class ServiceIdentityDoesNotExistException(ServiceApiException):
def __init__(self, service_identity):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 0,
u"Service identity does not exist", service_identity=service_identity)
class InvalidValueException(ServiceApiException):
def __init__(self, property_, reason):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 1,
u"Invalid value", property=property_, reason=reason)
class InvalidMenuItemCoordinatesException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 2,
u"A menu item has an x, y and page coordinate, with x and y smaller than 4")
class ReservedMenuItemException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 3,
u"This menu item is reserved")
class InvalidNameException(ServiceApiException):
def __init__(self, message):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 8,
"Invalid name, it must be shorter than 50 characters.", reason=message)
class ServiceAlreadyExistsException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 9,
u"Service with that e-mail address already exists")
class UnsupportedLanguageException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 10,
u"This language is not supported")
class FriendNotFoundException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 11,
u"User not in friends list")
class InvalidJsonStringException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 12,
u"Can not parse data as json object")
class AvatarImageNotSquareException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 13,
u"Expected a square input image")
class CategoryNotFoundException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 14,
u"Category not found")
class CallbackNotDefinedException(ServiceApiException):
def __init__(self, function):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 15,
u"Callback not defined", function=function)
class InvalidAppIdException(ServiceApiException):
def __init__(self, app_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 17,
u"Invalid app_id", app_id=app_id)
class UnsupportedAppIdException(ServiceApiException):
def __init__(self, app_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 18,
u"Unsupported app_id", app_id=app_id)
class RoleNotFoundException(ServiceApiException):
def __init__(self, role_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 19,
u"Role does not exist", role_id=role_id)
class RoleAlreadyExistsException(ServiceApiException):
def __init__(self, name):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 20,
u"Role with this name already exists", name=name)
class InvalidRoleTypeException(ServiceApiException):
def __init__(self, type_):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 21,
u"Invalid role type", type=type_)
class DeleteRoleFailedHasMembersException(ServiceApiException):
def __init__(self, role_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 22,
u"Cannot delete role which is still granted to people.", role_id=role_id)
class DeleteRoleFailedHasSMDException(ServiceApiException):
def __init__(self, role_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 23,
u"Cannot delete role which is still connected to a service menu item",
role_id=role_id)
class UserWithThisEmailAddressAlreadyExistsException(ServiceApiException):
def __init__(self, email):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 24,
u"An account with this e-mail address already exists", email=email)
class AppOperationDeniedException(ServiceApiException):
def __init__(self, app_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 25,
u"No permission to manage app", app_id=app_id)
class ServiceWithEmailDoesNotExistsException(ServiceApiException):
def __init__(self, service_identity_email):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 26,
u"There is no service with this email",
service_identity_email=service_identity_email)
class MyDigiPassNotSupportedException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 28,
u'Mydigipass is no longer supported',
unsupported_app_ids=[])
class AppFailedToResovelUrlException(ServiceApiException):
def __init__(self, url):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 32,
u"Failed to resolve url", url=url)
class AppFailedToCreateUserProfileWithExistingServiceException(ServiceApiException):
def __init__(self, email):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 33,
u"Failed to create user profile with the same email as a service account",
email=email)
class InvalidKeyException(ServiceApiException):
def __init__(self, key):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 34,
u"Invalid key", key=key)
class DuplicateCategoryIdException(ServiceApiException):
def __init__(self, category_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 35,
u"Duplicate category id", category_id=category_id)
class DuplicateItemIdException(ServiceApiException):
def __init__(self, category_id, item_id):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 36,
u"Duplicate item id", category_id=category_id, item_id=item_id)
class SigningNotSupportedException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 37,
u'The signing feature is no longer supported',
unsupported_app_ids=[])
class InvalidSignPayloadException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 38,
u'Invalid payload. Make sure the payload is base64 encoded properly.')
class ExportBrandingsException(ServiceApiException):
def __init__(self, errors):
message = 'The following items must be updated to use the newest version of the ' \
'branding with the same description:\n%s' % ' \n'.join(errors)
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 39, message)
class InvalidGroupTypeException(ServiceApiException):
def __init__(self, group_type):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 40,
u"Invalid group type", group_type=group_type)
class ModulesNotSupportedException(ServiceApiException):
def __init__(self, modules):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_SERVICE + 41,
u"Some modules are no longer supported: %s" % ', '.join(modules), modules=modules)
@returns(users.User)
@arguments(service_user=users.User, service_identity=unicode)
def get_and_validate_service_identity_user(service_user, service_identity):
if not service_identity or service_identity == MISSING:
service_identity = ServiceIdentity.DEFAULT
azzert(':' not in service_user.email(), "service_user.email() should not contain :")
service_identity_user = create_service_identity_user(service_user, service_identity)
if service_identity != ServiceIdentity.DEFAULT and get_service_identity(service_identity_user) is None:
raise ServiceIdentityDoesNotExistException(service_identity=service_identity)
return service_identity_user
@returns(ServiceConfigurationTO)
@arguments(service_user=users.User)
def get_configuration(service_user):
profile = get_service_profile(service_user)
conf = ServiceConfigurationTO()
conf.callBackURI = profile.callBackURI
if not profile.sik:
def trans():
profile = get_service_profile(service_user, cached=False)
profile.sik = unicode(generate_random_key())
sik = SIKKey(key_name=profile.sik)
sik.user = service_user
sik.put()
profile.put()
return profile
xg_on = db.create_transaction_options(xg=True)
profile = db.run_in_transaction_options(xg_on, trans)
conf.sik = profile.sik
conf.apiKeys = [APIKeyTO.fromDBAPIKey(k) for k in get_api_keys(service_user)]
conf.enabled = profile.enabled
conf.callBackFromJid = u"<EMAIL>"
conf.needsTestCall = profile.testCallNeeded
conf.callbacks = profile.callbacks
conf.autoUpdating = profile.autoUpdating
conf.updatesPending = profile.updatesPending
conf.autoLoginUrl = generate_auto_login_url(service_user)
if profile.enabled and profile.callBackURI == "mobidick" and conf.apiKeys:
settings = get_server_settings()
conf.mobidickUrl = u"%s/create_session?%s" % (
settings.mobidickAddress, urllib.urlencode((("ak", conf.apiKeys[0].key), ("sik", conf.sik))))
else:
conf.mobidickUrl = None
conf.actions = [] if conf.mobidickUrl else list(get_configuration_actions(service_user))
conf.regexCallbackConfigurations = []
callback_settings = ServiceCallBackSettings.create_key(service_user).get()
if callback_settings:
for config in callback_settings.configs:
conf.regexCallbackConfigurations.append(ServiceCallbackConfigurationRegexTO.fromModel(config))
return conf
@returns(ServiceLanguagesTO)
@arguments(service_user=users.User)
def get_service_translation_configuration(service_user):
service_profile = get_service_profile(service_user)
translationTO = ServiceLanguagesTO()
translationTO.defaultLanguage = service_profile.defaultLanguage
translationTO.defaultLanguageStr = get_full_language_string(service_profile.defaultLanguage)
translationTO.allLanguages = get_officially_supported_languages(iso_format=False)
translationTO.allLanguagesStr = map(get_full_language_string, translationTO.allLanguages)
translationTO.nonDefaultSupportedLanguages = sorted(service_profile.supportedLanguages[1:],
cmp=lambda x, y: 1 if get_full_language_string(
x) > get_full_language_string(y) else -1)
return translationTO
@returns(NoneType)
@arguments(service_profile=ServiceProfile, uri=unicode, callbacks=long)
def configure_profile(service_profile, uri, callbacks=long):
service_profile.testCallNeeded = True
service_profile.enabled = False
service_profile.callBackURI = uri
service_profile.callbacks = callbacks
@returns(NoneType)
@arguments(service_profile=ServiceProfile)
def configure_profile_for_mobidick(service_profile):
service_profile.testCallNeeded = False
service_profile.enabled = True
service_profile.callBackURI = "mobidick"
callbacks = 0
for cb in ServiceProfile.CALLBACKS:
callbacks |= cb
service_profile.callbacks = callbacks
@returns(NoneType)
@arguments(service_user=users.User)
def configure_mobidick(service_user):
api_keys = list(get_api_keys(service_user))
if not api_keys:
generate_api_key(service_user, "mobidick")
def trans():
profile = get_service_profile(service_user, cached=False)
configure_profile_for_mobidick(profile)
profile.put()
db.run_in_transaction(trans)
@returns(NoneType)
@arguments(service_user=users.User, function=unicode, | |
None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_component_version_stages`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/{entity}/versions/{name}/stages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Stage', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_component_hub_names(self, owner, **kwargs): # noqa: E501
"""List hub component names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_hub_names(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListComponentHubsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_component_hub_names_with_http_info(owner, **kwargs) # noqa: E501
def list_component_hub_names_with_http_info(self, owner, **kwargs): # noqa: E501
"""List hub component names # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_hub_names_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListComponentHubsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_component_hub_names" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_component_hub_names`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and local_var_params['bookmarks'] is not None: # noqa: E501
query_params.append(('bookmarks', local_var_params['bookmarks'])) # noqa: E501
if 'pins' in local_var_params and local_var_params['pins'] is not None: # noqa: E501
query_params.append(('pins', local_var_params['pins'])) # noqa: E501
if 'mode' in local_var_params and local_var_params['mode'] is not None: # noqa: E501
query_params.append(('mode', local_var_params['mode'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/hub/names', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListComponentHubsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_component_hubs(self, owner, **kwargs): # noqa: E501
"""List hub components # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_hubs(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListComponentHubsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_component_hubs_with_http_info(owner, **kwargs) # noqa: E501
def list_component_hubs_with_http_info(self, owner, **kwargs): # noqa: E501
"""List hub components # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_hubs_with_http_info(owner, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param bool bookmarks: Filter by bookmarks.
:param str pins: Pinned entities.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListComponentHubsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'offset',
'limit',
'sort',
'query',
'bookmarks',
'pins',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_component_hubs" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `list_component_hubs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'bookmarks' in local_var_params and | |
<gh_stars>0
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=protected-access,too-many-lines
"""Integration tests for `WithSOX302Flow` logic."""
import collections
import ddt
from ggrc.converters import errors
from ggrc.models import all_models
from ggrc.models.mixins import statusable
from ggrc.models.mixins import with_sox_302
from integration import ggrc as integration_tests_ggrc
from integration.ggrc import api_helper
from integration.ggrc import query_helper
from integration.ggrc.models import factories as ggrc_factories
READ_ONLY_WARNING = {
"Assessment": {
"row_warnings": {
errors.READONLY_WILL_BE_IGNORED.format(
line=3,
value='',
column_name='SOX 302 assessment workflow')
}
}
}
class BaseTestWithSOX302(integration_tests_ggrc.TestCase):
"""Base class for test cases for `WithSOX302Flow` logic."""
def _login(self):
"""Make `self.client` to exec further requests as if issuer's logged in."""
self.client.get("/login")
@staticmethod
def _get_query_by_audit_for(obj_type, audit_id):
# type: (db.Model, int) -> sqlalchemy.Query
"""Build sqlalchemy query for specific object type filtered by audit.
Build sqlalchemy query for objects of type `obj_type` which are related to
audit with `audit_id` ID. This helper method allows to build DB query when
audit ID is known and use this query later.
Args:
obj_type (db.Model): Class instance of objects to query.
audit_id (int): ID of Audit instance whose related objects should be
queried.
Returns:
Sqlalchemy.Query object which could be used for later queries.
"""
return obj_type.query.filter(obj_type.audit_id == audit_id)
@staticmethod
def _refresh_object(obj_type, obj_id):
# type: (db.Model) -> db.Model
"""Refresh passed object and attach it to the current session.
Args:
objs (db.Model): Instances of db.Model class to be refreshed.
Returns:
Refreshed instance of db.Model class.
"""
return obj_type.query.get(obj_id)
@staticmethod
def _get_asmt_tmpl_lcas(assessment_template):
# type: (model.AssessmentTemplate) -> List[model.CustomAttributeDefinition]
"""Return list of local CADs of AssessmentTemplate.
Return list of local CustomAttributeDefinition instances related to the
given AssessmentTemplate.
Args:
assessment_template (model.AssessmentTemplate): Assessment template whose
local custom attributes should be queried from DB.
Returns:
List of CustomAttributeDefinition instances.
"""
cad = all_models.CustomAttributeDefinition
return cad.query.filter(
cad.definition_type == assessment_template._inflector.table_singular,
cad.definition_id == assessment_template.id,
).all()
def _assert_sox_302_enabled_flag(self, obj, expected_value):
# type: (db.Model, bool) -> None
"""Assert that `sox_302_enabled` flag has expected value on object.
For this assertion to pass, following conditions should be met:
- Given object should be derived from `WithSOX302Flow` mixin;
- Value of `sox_302_enabled` on object should match `expected_value`.
Args:
obj (db.Model): Instance of db.Model class on which value of
`sox_302_enabled` flag should be checked.
expected_value (bool): Expected value of `sox_302_enabled` flag on the
given object.
"""
self.assertTrue(isinstance(obj, with_sox_302.WithSOX302Flow))
self.assertIsNotNone(obj)
self.assertEqual(
obj.sox_302_enabled,
expected_value,
)
def _assert_negative_options(self, cads, expected_cad_count,
expected_options, expected_negatives):
# type: (List[model.CustomAttributeDefinition], int, list, list) -> None
"""Assert that provided CADs have correct negative options.
For this assertion to pass, following conditions should be met:
- Number of CADs should match with `expected_cad_count`;
- Options of each CAD should match with options from `expected_options`;
- Options from `expected_negatives` should be negative ones on CAD;
Args:
cads (List[model.CustomAttributeDefinition]): List of CADs whoose options
should be checked.
expected_cad_count (int): Expected number of CADs.
expected_options (List[str]): List of expected options for each CAD.
expected_negatives (List[str]): List of expected negative options for
each CAD.
"""
self.assertEqual(expected_cad_count, len(cads))
for item in zip(cads, expected_options, expected_negatives):
lca, exp_options, exp_negatives = item
self.assertEqual(exp_options, lca.multi_choice_options)
self.assertIn(exp_negatives, lca.negative_options)
@ddt.ddt
class TestImportWithSOX302(BaseTestWithSOX302):
"""Test import of `WithSOX302Flow` objects."""
@ddt.data(
{"imported_value": "yes", "exp_value": True},
{"imported_value": "no", "exp_value": False},
{"imported_value": "", "exp_value": False},
)
@ddt.unpack
def test_sox_302_tmpl_create(self, imported_value, exp_value):
"""Test SOX302 enabled={exp_value} when create asmt tmpl via import."""
audit = ggrc_factories.AuditFactory()
audit_id = audit.id
asmt_tmpl_data = collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", ""),
("Audit*", audit.slug),
("Title", "AssessmentTemplate Title"),
("Default Assessment Type*", "Control"),
("Default Assignees*", "Auditors"),
("SOX 302 assessment workflow", imported_value),
])
self._login()
response = self.import_data(asmt_tmpl_data)
self._check_csv_response(response, {})
tmpl = self._get_query_by_audit_for(
all_models.AssessmentTemplate, audit_id).one()
self._assert_sox_302_enabled_flag(tmpl, exp_value)
@ddt.data(
{"init_value": True, "imported_value": "yes", "exp_value": True},
{"init_value": True, "imported_value": "no", "exp_value": False},
{"init_value": True, "imported_value": "", "exp_value": True},
{"init_value": False, "imported_value": "yes", "exp_value": True},
{"init_value": False, "imported_value": "no", "exp_value": False},
{"init_value": False, "imported_value": "", "exp_value": False},
)
@ddt.unpack
def test_sox_302_tmpl_update(self, init_value, imported_value, exp_value):
"""Test SOX302 enabled={exp_value} when update asmt tmpl via import."""
tmpl = ggrc_factories.AssessmentTemplateFactory(
sox_302_enabled=init_value)
tmpl_id = tmpl.id
asmt_tmpl_data = collections.OrderedDict([
("object_type", "Assessment Template"),
("Code*", tmpl.slug),
("SOX 302 assessment workflow", imported_value),
])
self._login()
response = self.import_data(asmt_tmpl_data)
self._check_csv_response(response, {})
tmpl = self._refresh_object(tmpl.__class__, tmpl_id)
self._assert_sox_302_enabled_flag(tmpl, exp_value)
@ddt.data(
{
"imported_value": "yes",
"exp_value": False,
"expected_warnings": READ_ONLY_WARNING
},
{
"imported_value": "no",
"exp_value": False,
"expected_warnings": READ_ONLY_WARNING
},
{
"imported_value": "",
"exp_value": False,
"expected_warnings": {}
},
)
@ddt.unpack
def test_sox_302_immut_asmt_create(self, imported_value, exp_value,
expected_warnings):
# pylint: disable=invalid-name
"""Test SOX 302 enabled is immutable when create asmt via import.
Test `sox_302_enabled` on Assessment could not be set via import if there
isn't any AssessmentTemplate provided in import data. SOX 302 enabled flag
is read only on Assessment and could be set only from template.
Warning that 'SOX 302 assessment workflow' will be ignored
via import is shown.
"""
audit = ggrc_factories.AuditFactory()
audit_id = audit.id
asmt_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Template", ""),
("Audit*", audit.slug),
("Title", "Assessment Title"),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("SOX 302 assessment workflow", imported_value),
])
self._login()
response = self.import_data(asmt_data)
self._check_csv_response(response, expected_warnings)
asmt = self._get_query_by_audit_for(all_models.Assessment, audit_id).one()
self._assert_sox_302_enabled_flag(asmt, exp_value)
@ddt.data(
{
"tmpl_value": True,
"imported_value": "yes",
"exp_value": True,
"expected_warnings": READ_ONLY_WARNING
},
{
"tmpl_value": True,
"imported_value": "no",
"exp_value": True,
"expected_warnings": READ_ONLY_WARNING
},
{
"tmpl_value": True,
"imported_value": "",
"exp_value": True,
"expected_warnings": {}
},
{
"tmpl_value": False,
"imported_value": "yes",
"exp_value": False,
"expected_warnings": READ_ONLY_WARNING
},
{
"tmpl_value": False,
"imported_value": "no",
"exp_value": False,
"expected_warnings": READ_ONLY_WARNING
},
{
"tmpl_value": False,
"imported_value": "",
"exp_value": False,
"expected_warnings": {}
},
)
@ddt.unpack
def test_sox_302_asmt_with_tmpl_create(self, tmpl_value, imported_value,
exp_value, expected_warnings):
# pylint: disable=invalid-name
"""Test SOX 302 enabled is mutable when create asmt with tmpl via import.
Test `sox_302_enabled` on Assessment could be set via import if there is an
AssessmentTemplate provided in import data. SOX 302 enabled flag is read
only on Assessment and could be set only from template.
Warning that 'SOX 302 assessment workflow' will be ignored
via import is shown.
"""
with ggrc_factories.single_commit():
audit = ggrc_factories.AuditFactory()
tmpl = ggrc_factories.AssessmentTemplateFactory(
audit=audit,
sox_302_enabled=tmpl_value,
)
audit_id = audit.id
asmt_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Template", tmpl.slug),
("Audit*", audit.slug),
("Title", "Assessment Title"),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("SOX 302 assessment workflow", imported_value),
])
self._login()
response = self.import_data(asmt_data)
self._check_csv_response(response, expected_warnings)
asmt = self._get_query_by_audit_for(all_models.Assessment, audit_id).one()
self._assert_sox_302_enabled_flag(asmt, exp_value)
@ddt.data(
{
"init_value": True,
"imported_value": "yes",
"exp_value": True,
"expected_warnings": {}
},
{
"init_value": True,
"imported_value": "no",
"exp_value": True,
"expected_warnings": READ_ONLY_WARNING
},
{
"init_value": True,
"imported_value": "",
"exp_value": True,
"expected_warnings": {}
},
{
"init_value": False,
"imported_value": "yes",
"exp_value": False,
"expected_warnings": READ_ONLY_WARNING
},
{
"init_value": False,
"imported_value": "no",
"exp_value": False,
"expected_warnings": {}
},
{
"init_value": False,
"imported_value": "",
"exp_value": False,
"expected_warnings": {}
},
)
@ddt.unpack
def test_sox_302_immut_asmt_upd(self, init_value, imported_value,
exp_value, expected_warnings):
# pylint: disable=invalid-name
"""Test SOX 302 enabled is immutable when update asmt via import.
Test `sox_302_enabled` on Assessment could not be set via import during
Assessment update if there isn't any AssessmentTemplate provided in import
data. SOX 302 enabled flag is read only on Assessment and could not be
updated in any way.
Warning that 'SOX 302 assessment workflow' will be ignored
via import is shown.
"""
asmt = ggrc_factories.AssessmentFactory(sox_302_enabled=init_value)
asmt_id = asmt.id
asmt_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmt.slug),
("SOX 302 assessment workflow", imported_value),
])
self._login()
response = self.import_data(asmt_data)
self._check_csv_response(response, expected_warnings)
asmt = self._refresh_object(asmt.__class__, asmt_id)
self._assert_sox_302_enabled_flag(asmt, exp_value)
@ddt.data(
{"init_value": True, "tmpl_value": True, "exp_value": True},
{"init_value": True, "tmpl_value": False, "exp_value": True},
{"init_value": False, "tmpl_value": True, "exp_value": False},
{"init_value": False, "tmpl_value": False, "exp_value": False},
)
@ddt.unpack
def test_sox_302_asmt_with_tmpl_upd(self, init_value, tmpl_value, exp_value):
"""Test SOX 302 enabled is immutable when update asmt with tmpl via import.
Test `sox_302_enabled` on Assessment could not be set via import during
Assessment update if there is an AssessmentTemplate provided in import
data. SOX 302 enabled flag is read only on Assessment and could not be
updated in noway.
"""
with ggrc_factories.single_commit():
asmt = ggrc_factories.AssessmentFactory(sox_302_enabled=init_value)
tmpl = ggrc_factories.AssessmentTemplateFactory(
audit=asmt.audit,
sox_302_enabled=tmpl_value,
)
asmt_id = asmt.id
asmt_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmt.slug),
("Template", tmpl.slug),
])
self._login()
response = self.import_data(asmt_data)
self._check_csv_response(response, {})
asmt = self._refresh_object(asmt.__class__, asmt_id)
self._assert_sox_302_enabled_flag(asmt, exp_value)
@ddt.data(
{
"lca_to_import": "Dropdown, LCA with negative, yes, (n)no",
"expected_options": ["yes,no"],
"expected_negatives": ["no"],
"expected_lca_count": 1,
},
{
"lca_to_import": "Rich Text, LCA with negative, empty, (n)not empty",
"expected_options": ["empty,not empty"],
"expected_negatives": ["not empty"],
"expected_lca_count": 1,
},
{
"lca_to_import": "Text, LCA | |
"""
Dictionaries containing basic atomic data.
The periodic tabla data is from: http://periodic.lanl.gov/index.shtml
"""
import collections
import astropy.units as u
_PeriodicTable = collections.namedtuple(
"periodic_table", ['group', 'category', 'block', 'period']
)
_Elements = {
"H": {
"atomic number": 1,
"atomic mass": 1.008 * u.u,
"element name": "hydrogen",
"period": 1,
"group": 1,
"block": "s",
"category": "nonmetal",
},
"He": {
"atomic number": 2,
"atomic mass": 4.002602 * u.u,
"element name": "helium",
"period": 1,
"group": 18,
"block": "s",
"category": "noble gas",
},
"Li": {
"atomic number": 3,
"atomic mass": 6.94 * u.u,
"element name": "lithium",
"period": 2,
"group": 1,
"block": "s",
"category": "alkali metal",
},
"Be": {
"atomic number": 4,
"atomic mass": 9.0121831 * u.u,
"element name": "beryllium",
"period": 2,
"group": 2,
"block": "s",
"category": "alkaline earth metal",
},
"B": {
"atomic number": 5,
"atomic mass": 10.806 * u.u,
"element name": "boron",
"period": 2,
"group": 13,
"block": "p",
"category": "metalloid",
},
"C": {
"atomic number": 6,
"atomic mass": 12.011 * u.u,
"element name": "carbon",
"period": 2,
"group": 14,
"block": "p",
"category": "nonmetal",
},
"N": {
"atomic number": 7,
"atomic mass": 14.007 * u.u,
"element name": "nitrogen",
"period": 2,
"group": 15,
"block": "p",
"category": "nonmetal",
},
"O": {
"atomic number": 8,
"atomic mass": 15.999 * u.u,
"element name": "oxygen",
"period": 2,
"group": 16,
"block": "p",
"category": "nonmetal",
},
"F": {
"atomic number": 9,
"atomic mass": 18.998403163 * u.u,
"element name": "fluorine",
"period": 2,
"group": 17,
"block": "p",
"category": "halogen",
},
"Ne": {
"atomic number": 10,
"atomic mass": 20.1797 * u.u,
"element name": "neon",
"period": 2,
"group": 18,
"block": "p",
"category": "noble gas",
},
"Na": {
"atomic number": 11,
"atomic mass": 22.98976928 * u.u,
"element name": "sodium",
"period": 3,
"group": 1,
"block": "s",
"category": "alkali metal",
},
"Mg": {
"atomic number": 12,
"atomic mass": 24.305 * u.u,
"element name": "magnesium",
"period": 3,
"group": 2,
"block": "s",
"category": "alkaline earth metal",
},
"Al": {
"atomic number": 13,
"atomic mass": 26.9815385 * u.u,
"element name": "aluminium",
"period": 3,
"group": 13,
"block": "p",
"category": "post-transition metal",
},
"Si": {
"atomic number": 14,
"atomic mass": 28.085 * u.u,
"element name": "silicon",
"period": 3,
"group": 14,
"block": "p",
"category": "metalloid",
},
"P": {
"atomic number": 15,
"atomic mass": 30.973761998 * u.u,
"element name": "phosphorus",
"period": 3,
"group": 15,
"block": "p",
"category": "nonmetal",
},
"S": {
"atomic number": 16,
"symbol": "S",
"atomic mass": 32.06 * u.u,
"element name": "sulfur",
"period": 3,
"group": 16,
"block": "p",
"category": "nonmetal",
},
"Cl": {
"atomic number": 17,
"atomic mass": 35.45 * u.u,
"element name": "chlorine",
"period": 3,
"group": 17,
"block": "p",
"category": "halogen",
},
"Ar": {
"atomic number": 18,
"atomic mass": 39.948 * u.u,
"element name": "argon",
"period": 3,
"group": 18,
"block": "p",
"category": "noble gas",
},
"K": {
"atomic number": 19,
"atomic mass": 39.0983 * u.u,
"element name": "potassium",
"period": 4,
"group": 1,
"block": "s",
"category": "alkali metal",
},
"Ca": {
"atomic number": 20,
"atomic mass": 40.078 * u.u,
"element name": "calcium",
"period": 4,
"group": 2,
"block": "s",
"category": "alkaline earth metal",
},
"Sc": {
"atomic number": 21,
"atomic mass": 44.955908 * u.u,
"element name": "scandium",
"period": 4,
"group": 3,
"block": "d",
"category": "transition metal",
},
"Ti": {
"atomic number": 22,
"atomic mass": 47.867 * u.u,
"element name": "titanium",
"period": 4,
"group": 4,
"block": "d",
"category": "transition metal",
},
"V": {
"atomic number": 23,
"atomic mass": 50.9415 * u.u,
"element name": "vanadium",
"period": 4,
"group": 5,
"block": "d",
"category": "transition metal",
},
"Cr": {
"atomic number": 24,
"atomic mass": 51.9961 * u.u,
"element name": "chromium",
"period": 4,
"group": 6,
"block": "d",
"category": "transition metal",
},
"Mn": {
"atomic number": 25,
"atomic mass": 54.938044 * u.u,
"element name": "manganese",
"period": 4,
"group": 7,
"block": "d",
"category": "transition metal",
},
"Fe": {
"atomic number": 26,
"atomic mass": 55.845 * u.u,
"element name": "iron",
"period": 4,
"group": 8,
"block": "d",
"category": "transition metal",
},
"Co": {
"atomic number": 27,
"atomic mass": 58.933 * u.u,
"element name": "cobalt",
"period": 4,
"group": 9,
"block": "d",
"category": "transition metal",
},
"Ni": {
"atomic number": 28,
"atomic mass": 58.6934 * u.u,
"element name": "nickel",
"period": 4,
"group": 10,
"block": "d",
"category": "transition metal",
},
"Cu": {
"atomic number": 29,
"atomic mass": 63.546 * u.u,
"element name": "copper",
"period": 4,
"group": 11,
"block": "d",
"category": "transition metal",
},
"Zn": {
"atomic number": 30,
"atomic mass": 65.38 * u.u,
"element name": "zinc",
"period": 4,
"group": 12,
"block": "d",
"category": "transition metal",
},
"Ga": {
"atomic number": 31,
"atomic mass": 69.723 * u.u,
"element name": "gallium",
"period": 4,
"group": 13,
"block": "p",
"category": "post-transition metal",
},
"Ge": {
"atomic number": 32,
"atomic mass": 72.630 * u.u,
"element name": "germanium",
"period": 4,
"group": 14,
"block": "p",
"category": "metalloid",
},
"As": {
"atomic number": 33,
"atomic mass": 74.921595 * u.u,
"element name": "arsenic",
"period": 4,
"group": 15,
"block": "p",
"category": "metalloid",
},
"Se": {
"atomic number": 34,
"atomic mass": 78.971 * u.u,
"element name": "selenium",
"period": 4,
"group": 16,
"block": "p",
"category": "nonmetal",
},
"Br": {
"atomic number": 35,
"atomic mass": 79.904 * u.u,
"element name": "bromine",
"period": 4,
"group": 17,
"block": "p",
"category": "halogen",
},
"Kr": {
"atomic number": 36,
"atomic mass": 83.798 * u.u,
"element name": "krypton",
"period": 4,
"group": 18,
"block": "p",
"category": "noble gas",
},
"Rb": {
"atomic number": 37,
"atomic mass": 85.4678 * u.u,
"element name": "rubidium",
"period": 5,
"group": 1,
"block": "s",
"category": "alkali metal",
},
"Sr": {
"atomic number": 38,
"atomic mass": 87.62 * u.u,
"element name": "strontium",
"period": 5,
"group": 2,
"block": "s",
"category": "alkaline earth metal",
},
"Y": {
"atomic number": 39,
"atomic mass": 88.90584 * u.u,
"element name": "yttrium",
"period": 5,
"group": 3,
"block": "d",
"category": "transition metal",
},
"Zr": {
"atomic number": 40,
"atomic mass": 91.224 * u.u,
"element name": "zirconium",
"period": 5,
"group": 4,
"block": "d",
"category": "transition metal",
},
"Nb": {
"atomic number": 41,
"atomic mass": 92.90637 * u.u,
"element name": "niobium",
"period": 5,
"group": 5,
"block": "d",
"category": "transition metal",
},
"Mo": {
"atomic number": 42,
"atomic mass": 95.95 * u.u,
"element name": "molybdenum",
"period": 5,
"group": 6,
"block": "d",
"category": "transition metal",
},
"Tc": {
"atomic number": 43,
"element name": "technetium",
"period": 5,
"group": 7,
"block": "d",
"category": "transition metal",
},
"Ru": {
"atomic number": 44,
"atomic mass": 101.07 * u.u,
"element name": "ruthenium",
"period": 5,
"group": 8,
"block": "d",
"category": "transition metal",
},
"Rh": {
"atomic number": 45,
"atomic mass": 102.90550 * u.u,
"element name": "rhodium",
"period": 5,
"group": 9,
"block": "d",
"category": "transition metal",
},
"Pd": {
"atomic number": 46,
"atomic mass": 106.42 * u.u,
"element name": "palladium",
"period": 5,
"group": 10,
"block": "d",
"category": "transition metal",
},
"Ag": {
"atomic number": 47,
"atomic mass": 107.8682 * u.u,
"element name": "silver",
"period": 5,
"group": 11,
"block": "d",
"category": "transition metal",
},
"Cd": {
"atomic number": 48,
"atomic mass": 112.414 * u.u,
"element name": "cadmium",
"period": 5,
"group": 12,
"block": "d",
"category": "transition metal",
},
"In": {
"atomic number": 49,
"atomic mass": 114.818 * u.u,
"element name": "indium",
"period": 5,
"group": 13,
"block": "p",
"category": "post-transition metal",
},
"Sn": {
"atomic number": 50,
"atomic mass": 118.710 * u.u,
"element name": "tin",
"period": 5,
"group": 14,
"block": "p",
"category": "post-transition metal",
},
"Sb": {
"atomic number": 51,
"atomic mass": 121.760 * u.u,
"element name": "antimony",
"period": 5,
"group": 15,
"block": "p",
"category": "metalloid",
},
"Te": {
"atomic number": 52,
"atomic mass": 127.60 * u.u,
"element name": "tellurium",
"period": 5,
"group": 16,
"block": "p",
"category": "metalloid",
},
"I": {
"atomic number": 53,
"atomic mass": 126.90447 * u.u,
"element name": "iodine",
"period": 5,
"group": 17,
"block": "p",
"category": "halogen",
},
"Xe": {
"atomic number": 54,
"atomic mass": 131.293 * u.u,
"element name": "xenon",
"period": 5,
"group": 18,
"block": "p",
"category": "noble gas",
},
"Cs": {
"atomic number": 55,
"atomic mass": 132.90545196 * u.u,
"element name": "caesium",
"period": 6,
"group": 1,
"block": "s",
"category": "alkali metal",
},
"Ba": {
"atomic number": 56,
"atomic mass": | |
return _STEPConstruct.STEPConstruct_AP203Context_GetApproval(self, *args)
def GetApprover(self, *args):
"""
:rtype: Handle_StepBasic_ApprovalPersonOrganization
"""
return _STEPConstruct.STEPConstruct_AP203Context_GetApprover(self, *args)
def GetApprovalDateTime(self, *args):
"""
:rtype: Handle_StepBasic_ApprovalDateTime
"""
return _STEPConstruct.STEPConstruct_AP203Context_GetApprovalDateTime(self, *args)
def GetProductCategoryRelationship(self, *args):
"""
* Return entities (roots) instantiated for the part by method Init
:rtype: Handle_StepBasic_ProductCategoryRelationship
"""
return _STEPConstruct.STEPConstruct_AP203Context_GetProductCategoryRelationship(self, *args)
def Clear(self, *args):
"""
* Clears all fields describing entities specific to each part
:rtype: None
"""
return _STEPConstruct.STEPConstruct_AP203Context_Clear(self, *args)
def InitRoles(self, *args):
"""
* Initializes constant fields (shared entities)
:rtype: None
"""
return _STEPConstruct.STEPConstruct_AP203Context_InitRoles(self, *args)
def InitAssembly(self, *args):
"""
* Initializes all missing data which are required for assembly
:param nauo:
:type nauo: Handle_StepRepr_NextAssemblyUsageOccurrence &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_AP203Context_InitAssembly(self, *args)
def InitSecurityRequisites(self, *args):
"""
* Initializes ClassificationOfficer and ClassificationDate entities according to Security entity
:rtype: None
"""
return _STEPConstruct.STEPConstruct_AP203Context_InitSecurityRequisites(self, *args)
def InitApprovalRequisites(self, *args):
"""
* Initializes Approver and ApprovalDateTime entities according to Approval entity
:rtype: None
"""
return _STEPConstruct.STEPConstruct_AP203Context_InitApprovalRequisites(self, *args)
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_AP203Context
STEPConstruct_AP203Context.DefaultApproval = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_DefaultApproval,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.SetDefaultApproval = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_SetDefaultApproval,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.DefaultDateAndTime = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_DefaultDateAndTime,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.SetDefaultDateAndTime = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_SetDefaultDateAndTime,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.DefaultPersonAndOrganization = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_DefaultPersonAndOrganization,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.SetDefaultPersonAndOrganization = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_SetDefaultPersonAndOrganization,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.DefaultSecurityClassificationLevel = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_DefaultSecurityClassificationLevel,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.SetDefaultSecurityClassificationLevel = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_SetDefaultSecurityClassificationLevel,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleCreator = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleCreator,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleDesignOwner = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleDesignOwner,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleDesignSupplier = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleDesignSupplier,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleClassificationOfficer = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleClassificationOfficer,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleCreationDate = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleCreationDate,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleClassificationDate = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleClassificationDate,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.RoleApprover = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_RoleApprover,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.Init = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_Init,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetCreator = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetCreator,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetDesignOwner = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetDesignOwner,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetDesignSupplier = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetDesignSupplier,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetClassificationOfficer = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetClassificationOfficer,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetSecurity = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetSecurity,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetCreationDate = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetCreationDate,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetClassificationDate = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetClassificationDate,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetApproval = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetApproval,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetApprover = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetApprover,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetApprovalDateTime = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetApprovalDateTime,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.GetProductCategoryRelationship = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_GetProductCategoryRelationship,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.Clear = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_Clear,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.InitRoles = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_InitRoles,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.InitAssembly = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_InitAssembly,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.InitSecurityRequisites = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_InitSecurityRequisites,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context.InitApprovalRequisites = new_instancemethod(_STEPConstruct.STEPConstruct_AP203Context_InitApprovalRequisites,None,STEPConstruct_AP203Context)
STEPConstruct_AP203Context_swigregister = _STEPConstruct.STEPConstruct_AP203Context_swigregister
STEPConstruct_AP203Context_swigregister(STEPConstruct_AP203Context)
class STEPConstruct_Assembly(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_STEPConstruct.STEPConstruct_Assembly_swiginit(self,_STEPConstruct.new_STEPConstruct_Assembly(*args))
def Init(self, *args):
"""
* Initialises with starting values Ax0 : origin axis (typically, standard XYZ) Loc : location to which place the item Makes a MappedItem Resulting Value is returned by ItemValue
:param aSR:
:type aSR: Handle_StepShape_ShapeDefinitionRepresentation &
:param SDR0:
:type SDR0: Handle_StepShape_ShapeDefinitionRepresentation &
:param Ax0:
:type Ax0: Handle_StepGeom_Axis2Placement3d &
:param Loc:
:type Loc: Handle_StepGeom_Axis2Placement3d &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_Assembly_Init(self, *args)
def MakeRelationship(self, *args):
"""
* Make a (ShapeRepresentationRelationship,...WithTransformation) Resulting Value is returned by ItemValue
:rtype: None
"""
return _STEPConstruct.STEPConstruct_Assembly_MakeRelationship(self, *args)
def ItemValue(self, *args):
"""
* Returns the Value If no Make... has been called, returns the starting SR
:rtype: Handle_Standard_Transient
"""
return _STEPConstruct.STEPConstruct_Assembly_ItemValue(self, *args)
def ItemLocation(self, *args):
"""
* Returns the location of the item, computed from starting aLoc
:rtype: Handle_StepGeom_Axis2Placement3d
"""
return _STEPConstruct.STEPConstruct_Assembly_ItemLocation(self, *args)
def GetNAUO(self, *args):
"""
* Returns NAUO object describing the assembly link
:rtype: Handle_StepRepr_NextAssemblyUsageOccurrence
"""
return _STEPConstruct.STEPConstruct_Assembly_GetNAUO(self, *args)
def CheckSRRReversesNAUO(*args):
"""
* Checks whether SRR's definition of assembly and component contradicts with NAUO definition or not, according to model schema (AP214 or AP203)
:param Model:
:type Model: Handle_Interface_InterfaceModel &
:param CDSR:
:type CDSR: Handle_StepShape_ContextDependentShapeRepresentation &
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_Assembly_CheckSRRReversesNAUO(*args)
CheckSRRReversesNAUO = staticmethod(CheckSRRReversesNAUO)
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_Assembly
STEPConstruct_Assembly.Init = new_instancemethod(_STEPConstruct.STEPConstruct_Assembly_Init,None,STEPConstruct_Assembly)
STEPConstruct_Assembly.MakeRelationship = new_instancemethod(_STEPConstruct.STEPConstruct_Assembly_MakeRelationship,None,STEPConstruct_Assembly)
STEPConstruct_Assembly.ItemValue = new_instancemethod(_STEPConstruct.STEPConstruct_Assembly_ItemValue,None,STEPConstruct_Assembly)
STEPConstruct_Assembly.ItemLocation = new_instancemethod(_STEPConstruct.STEPConstruct_Assembly_ItemLocation,None,STEPConstruct_Assembly)
STEPConstruct_Assembly.GetNAUO = new_instancemethod(_STEPConstruct.STEPConstruct_Assembly_GetNAUO,None,STEPConstruct_Assembly)
STEPConstruct_Assembly_swigregister = _STEPConstruct.STEPConstruct_Assembly_swigregister
STEPConstruct_Assembly_swigregister(STEPConstruct_Assembly)
def STEPConstruct_Assembly_CheckSRRReversesNAUO(*args):
"""
* Checks whether SRR's definition of assembly and component contradicts with NAUO definition or not, according to model schema (AP214 or AP203)
:param Model:
:type Model: Handle_Interface_InterfaceModel &
:param CDSR:
:type CDSR: Handle_StepShape_ContextDependentShapeRepresentation &
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_Assembly_CheckSRRReversesNAUO(*args)
class STEPConstruct_ContextTool(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aStepModel:
:type aStepModel: Handle_StepData_StepModel &
:rtype: None
"""
_STEPConstruct.STEPConstruct_ContextTool_swiginit(self,_STEPConstruct.new_STEPConstruct_ContextTool(*args))
def SetModel(self, *args):
"""
* Initialize ApplicationProtocolDefinition by the first entity of that type found in the model
:param aStepModel:
:type aStepModel: Handle_StepData_StepModel &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetModel(self, *args)
def GetAPD(self, *args):
"""
:rtype: Handle_StepBasic_ApplicationProtocolDefinition
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetAPD(self, *args)
def AddAPD(self, *args):
"""
:param enforce: default value is Standard_False
:type enforce: bool
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_AddAPD(self, *args)
def IsAP203(self, *args):
"""
* Returns True if APD.schema_name is config_control_design
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_ContextTool_IsAP203(self, *args)
def IsAP214(self, *args):
"""
* Returns True if APD.schema_name is automotive_design
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_ContextTool_IsAP214(self, *args)
def GetACstatus(self, *args):
"""
:rtype: Handle_TCollection_HAsciiString
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetACstatus(self, *args)
def GetACschemaName(self, *args):
"""
:rtype: Handle_TCollection_HAsciiString
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetACschemaName(self, *args)
def GetACyear(self, *args):
"""
:rtype: int
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetACyear(self, *args)
def GetACname(self, *args):
"""
:rtype: Handle_TCollection_HAsciiString
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetACname(self, *args)
def SetACstatus(self, *args):
"""
:param status:
:type status: Handle_TCollection_HAsciiString &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetACstatus(self, *args)
def SetACschemaName(self, *args):
"""
:param schemaName:
:type schemaName: Handle_TCollection_HAsciiString &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetACschemaName(self, *args)
def SetACyear(self, *args):
"""
:param year:
:type year: int
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetACyear(self, *args)
def SetACname(self, *args):
"""
:param name:
:type name: Handle_TCollection_HAsciiString &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetACname(self, *args)
def GetDefaultAxis(self, *args):
"""
* Returns a default axis placement
:rtype: Handle_StepGeom_Axis2Placement3d
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetDefaultAxis(self, *args)
def AP203Context(self, *args):
"""
* Returns tool which maintains context specific for AP203
:rtype: STEPConstruct_AP203Context
"""
return _STEPConstruct.STEPConstruct_ContextTool_AP203Context(self, *args)
def Level(self, *args):
"""
* Returns current assembly level
:rtype: int
"""
return _STEPConstruct.STEPConstruct_ContextTool_Level(self, *args)
def NextLevel(self, *args):
"""
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_NextLevel(self, *args)
def PrevLevel(self, *args):
"""
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_PrevLevel(self, *args)
def SetLevel(self, *args):
"""
* Changes current assembly level
:param lev:
:type lev: int
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetLevel(self, *args)
def Index(self, *args):
"""
* Returns current index of assembly component on current level
:rtype: int
"""
return _STEPConstruct.STEPConstruct_ContextTool_Index(self, *args)
def NextIndex(self, *args):
"""
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_NextIndex(self, *args)
def PrevIndex(self, *args):
"""
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_PrevIndex(self, *args)
def SetIndex(self, *args):
"""
* Changes current index of assembly component on current level
:param ind:
:type ind: int
:rtype: None
"""
return _STEPConstruct.STEPConstruct_ContextTool_SetIndex(self, *args)
def GetProductName(self, *args):
"""
* Generates a product name basing on write.step.product.name parameter and current position in the assembly structure
:rtype: Handle_TCollection_HAsciiString
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetProductName(self, *args)
def GetRootsForPart(self, *args):
"""
* Produces and returns a full list of root entities required for part identified by SDRTool (including SDR itself)
:param SDRTool:
:type SDRTool: STEPConstruct_Part &
:rtype: Handle_TColStd_HSequenceOfTransient
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetRootsForPart(self, *args)
def GetRootsForAssemblyLink(self, *args):
"""
* Produces and returns a full list of root entities required for assembly link identified by assembly (including NAUO and CDSR)
:param assembly:
:type assembly: STEPConstruct_Assembly &
:rtype: Handle_TColStd_HSequenceOfTransient
"""
return _STEPConstruct.STEPConstruct_ContextTool_GetRootsForAssemblyLink(self, *args)
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_ContextTool
STEPConstruct_ContextTool.SetModel = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetModel,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetAPD = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetAPD,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.AddAPD = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_AddAPD,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.IsAP203 = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_IsAP203,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.IsAP214 = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_IsAP214,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetACstatus = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetACstatus,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetACschemaName = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetACschemaName,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetACyear = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetACyear,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetACname = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetACname,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetACstatus = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetACstatus,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetACschemaName = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetACschemaName,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetACyear = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetACyear,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetACname = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetACname,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetDefaultAxis = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetDefaultAxis,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.AP203Context = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_AP203Context,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.Level = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_Level,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.NextLevel = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_NextLevel,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.PrevLevel = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_PrevLevel,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetLevel = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetLevel,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.Index = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_Index,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.NextIndex = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_NextIndex,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.PrevIndex = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_PrevIndex,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.SetIndex = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_SetIndex,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetProductName = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetProductName,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetRootsForPart = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetRootsForPart,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool.GetRootsForAssemblyLink = new_instancemethod(_STEPConstruct.STEPConstruct_ContextTool_GetRootsForAssemblyLink,None,STEPConstruct_ContextTool)
STEPConstruct_ContextTool_swigregister = _STEPConstruct.STEPConstruct_ContextTool_swigregister
STEPConstruct_ContextTool_swigregister(STEPConstruct_ContextTool)
class STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient(OCC.TCollection.TCollection_BasicMapIterator):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aMap:
:type aMap: STEPConstruct_DataMapOfAsciiStringTransient &
:rtype: None
"""
_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_swiginit(self,_STEPConstruct.new_STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient(*args))
def Initialize(self, *args):
"""
:param aMap:
:type aMap: STEPConstruct_DataMapOfAsciiStringTransient &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Initialize(self, *args)
def Key(self, *args):
"""
:rtype: TCollection_AsciiString
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Key(self, *args)
def Value(self, *args):
"""
:rtype: Handle_Standard_Transient
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Value(self, *args)
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient
STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient.Initialize = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Initialize,None,STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient)
STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient.Key = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Key,None,STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient)
STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient.Value = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_Value,None,STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient)
STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_swigregister = _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_swigregister
STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient_swigregister(STEPConstruct_DataMapIteratorOfDataMapOfAsciiStringTransient)
class STEPConstruct_DataMapIteratorOfDataMapOfPointTransient(OCC.TCollection.TCollection_BasicMapIterator):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aMap:
:type aMap: STEPConstruct_DataMapOfPointTransient &
:rtype: None
"""
_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_swiginit(self,_STEPConstruct.new_STEPConstruct_DataMapIteratorOfDataMapOfPointTransient(*args))
def Initialize(self, *args):
"""
:param aMap:
:type aMap: STEPConstruct_DataMapOfPointTransient &
:rtype: None
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Initialize(self, *args)
def Key(self, *args):
"""
:rtype: gp_Pnt
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Key(self, *args)
def Value(self, *args):
"""
:rtype: Handle_Standard_Transient
"""
return _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Value(self, *args)
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_DataMapIteratorOfDataMapOfPointTransient
STEPConstruct_DataMapIteratorOfDataMapOfPointTransient.Initialize = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Initialize,None,STEPConstruct_DataMapIteratorOfDataMapOfPointTransient)
STEPConstruct_DataMapIteratorOfDataMapOfPointTransient.Key = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Key,None,STEPConstruct_DataMapIteratorOfDataMapOfPointTransient)
STEPConstruct_DataMapIteratorOfDataMapOfPointTransient.Value = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_Value,None,STEPConstruct_DataMapIteratorOfDataMapOfPointTransient)
STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_swigregister = _STEPConstruct.STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_swigregister
STEPConstruct_DataMapIteratorOfDataMapOfPointTransient_swigregister(STEPConstruct_DataMapIteratorOfDataMapOfPointTransient)
class STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient(OCC.TCollection.TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param K:
:type K: TCollection_AsciiString &
:param I:
:type I: Handle_Standard_Transient &
:param n:
:type n: TCollection_MapNodePtr &
:rtype: None
"""
_STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_swiginit(self,_STEPConstruct.new_STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient(*args))
def Key(self, *args):
"""
:rtype: TCollection_AsciiString
"""
return _STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_Key(self, *args)
def Value(self, *args):
"""
:rtype: Handle_Standard_Transient
"""
return _STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_Value(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _STEPConstruct.delete_STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient
STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient.Key = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_Key,None,STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient)
STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient.Value = new_instancemethod(_STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_Value,None,STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient)
STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_swigregister = _STEPConstruct.STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_swigregister
STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient_swigregister(STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient)
class Handle_STEPConstruct_DataMapNodeOfDataMapOfAsciiStringTransient(OCC.TCollection.Handle_TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUOSPFAreasFetcher
from bambou import NURESTObject
class NUOSPFInstance(NURESTObject):
""" Represents a OSPFInstance in the VSD
Notes:
The OSPF instance is the highest hierarchical OSPF configuration object in a domain. The OSPF instance allows you to assign global import and export routing policies for OSPF traffic in the domain.
"""
__rest_name__ = "ospfinstance"
__resource_name__ = "ospfinstances"
## Constants
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_OSPF_TYPE_OSPFV2 = "OSPFv2"
CONST_OSPF_TYPE_OSPFV3 = "OSPFv3"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a OSPFInstance instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> ospfinstance = NUOSPFInstance(id=u'xxxx-xxx-xxx-xxx', name=u'OSPFInstance')
>>> ospfinstance = NUOSPFInstance(data=my_dict)
"""
super(NUOSPFInstance, self).__init__()
# Read/Write Attributes
self._ip_type = None
self._ospf_type = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._preference = None
self._associated_export_routing_policy_id = None
self._associated_import_routing_policy_id = None
self._super_backbone_enabled = None
self._owner = None
self._export_limit = None
self._export_to_overlay = None
self._external_id = None
self._external_preference = None
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4'])
self.expose_attribute(local_name="ospf_type", remote_name="OSPFType", attribute_type=str, is_required=False, is_unique=True, choices=[u'OSPFv2', u'OSPFv3'])
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="preference", remote_name="preference", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_export_routing_policy_id", remote_name="associatedExportRoutingPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_import_routing_policy_id", remote_name="associatedImportRoutingPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="super_backbone_enabled", remote_name="superBackboneEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="export_limit", remote_name="exportLimit", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="export_to_overlay", remote_name="exportToOverlay", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="external_preference", remote_name="externalPreference", attribute_type=int, is_required=False, is_unique=False)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ospf_areas = NUOSPFAreasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ip_type(self):
""" Get ip_type value.
Notes:
The IP Type of the OSPF Instance, currently only IPv4 is supported.
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
The IP Type of the OSPF Instance, currently only IPv4 is supported.
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ospf_type(self):
""" Get ospf_type value.
Notes:
Type of the OSPF protocol, possible values are OSPFv2 and OSPFv3.
This attribute is named `OSPFType` in VSD API.
"""
return self._ospf_type
@ospf_type.setter
def ospf_type(self, value):
""" Set ospf_type value.
Notes:
Type of the OSPF protocol, possible values are OSPFv2 and OSPFv3.
This attribute is named `OSPFType` in VSD API.
"""
self._ospf_type = value
@property
def name(self):
""" Get name value.
Notes:
Name of OSPF Instance
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of OSPF Instance
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
Description of OSPF Instance
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of OSPF Instance
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def preference(self):
""" Get preference value.
Notes:
Preference for OSPF Internal Routes.
"""
return self._preference
@preference.setter
def preference(self, value):
""" Set preference value.
Notes:
Preference for OSPF Internal Routes.
"""
self._preference = value
@property
def associated_export_routing_policy_id(self):
""" Get associated_export_routing_policy_id value.
Notes:
Export OSPF Routing Policy ID
This attribute is named `associatedExportRoutingPolicyID` in VSD API.
"""
return self._associated_export_routing_policy_id
@associated_export_routing_policy_id.setter
def associated_export_routing_policy_id(self, value):
""" Set associated_export_routing_policy_id value.
Notes:
Export OSPF Routing Policy ID
This attribute is named `associatedExportRoutingPolicyID` in VSD API.
"""
self._associated_export_routing_policy_id = value
@property
def associated_import_routing_policy_id(self):
""" Get associated_import_routing_policy_id value.
Notes:
Import OSPF Routing Policy ID
This attribute is named `associatedImportRoutingPolicyID` in VSD API.
"""
return self._associated_import_routing_policy_id
@associated_import_routing_policy_id.setter
def associated_import_routing_policy_id(self, value):
""" Set associated_import_routing_policy_id value.
Notes:
Import OSPF Routing | |
# -*- coding: utf-8 -*-
import os
import unittest.mock as mock
import pytest
from unidiff import PatchSet
from badwolf.spec import Specification
from badwolf.context import Context
from badwolf.lint.processor import LintProcessor
from badwolf.utils import ObjectDict
CURR_PATH = os.path.abspath(os.path.dirname(__file__))
FIXTURES_PATH = os.path.join(CURR_PATH, 'fixtures')
@pytest.fixture(scope='function')
def pr_context():
return Context(
'deepanalyzer/badwolf',
None,
'pullrequest',
'message',
{
'repository': {'full_name': 'deepanalyzer/badwolf'},
'branch': {'name': 'master'},
'commit': {'hash': '000000'}
},
{'commit': {'hash': '111111'}},
pr_id=1
)
def test_no_linters_ignore(app, pr_context):
spec = Specification()
lint = LintProcessor(pr_context, spec, '/tmp')
with mock.patch.object(lint, 'load_changes') as load_changes:
lint.process()
load_changes.assert_not_called()
def test_load_changes_failed_ignore(app, pr_context, caplog):
spec = Specification()
spec.linters.append('flake8')
lint = LintProcessor(pr_context, spec, '/tmp')
with mock.patch.object(lint, 'load_changes') as load_changes:
load_changes.return_value = None
lint.process()
assert load_changes.called
assert 'Load changes failed' in caplog.text
def test_no_changed_files_ignore(app, pr_context, caplog):
diff = """diff --git a/removed_file b/removed_file
deleted file mode 100644
index 1f38447..0000000
--- a/removed_file
+++ /dev/null
@@ -1,3 +0,0 @@
-This content shouldn't be here.
-
-This file will be removed.
"""
spec = Specification()
spec.linters.append(ObjectDict(name='flake8', pattern=None))
lint = LintProcessor(pr_context, spec, '/tmp')
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes:
load_changes.return_value = patch
lint.process()
assert load_changes.called
assert 'No changed files found' in caplog.text
def test_flake8_lint_a_py(app, pr_context):
diff = """diff --git a/a.py b/a.py
new file mode 100644
index 0000000..fdeea15
--- /dev/null
+++ b/a.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+
+def add(a, b):
+ return a+ b
"""
spec = Specification()
spec.linters.append(ObjectDict(name='flake8', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'flake8'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.py'
assert problem.line == 6
@pytest.mark.xfail(reason='eslint may changes')
def test_eslint_lint_a_js(app, pr_context):
diff = """diff --git a/.eslintrc b/.eslintrc
new file mode 100644
index 0000000..45e5d69
--- /dev/null
+++ b/.eslintrc
@@ -0,0 +1,5 @@
+{
+ "rules": {
+ "quotes": [2, "single"]
+ }
+}
diff --git a/a.js b/a.js
new file mode 100644
index 0000000..f119a7f
--- /dev/null
+++ b/a.js
@@ -0,0 +1 @@
+console.log("bar")
"""
spec = Specification()
spec.linters.append(ObjectDict(name='eslint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'eslint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.js'
assert problem.line == 1
def test_pycodestyle_lint_a_py(app, pr_context):
diff = """diff --git a/a.py b/a.py
new file mode 100644
index 0000000..fdeea15
--- /dev/null
+++ b/a.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+
+def add(a, b):
+ return a+ b
"""
spec = Specification()
spec.linters.append(ObjectDict(name='pycodestyle', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'pycodestyle'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.py'
assert problem.line == 6
def test_jsonlint_a_json(app, pr_context):
diff = """diff --git a/a.json b/a.json
new file mode 100644
index 0000000..266e19f
--- /dev/null
+++ b/a.json
@@ -0,0 +1 @@
+{"a": 1,}
"""
spec = Specification()
spec.linters.append(ObjectDict(name='jsonlint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'jsonlint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.json'
assert problem.line == 1
def test_jsonlint_a_json_changes_in_range(app, pr_context):
diff = """diff --git a/b.json b/b.json
index 6ebebfe..6be8d74 100644
--- a/b.json
+++ b/b.json
@@ -1,3 +1,4 @@
{
"a": 1
+ "b": 2
}
"""
spec = Specification()
spec.linters.append(ObjectDict(name='jsonlint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'jsonlint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'b.json'
assert problem.line == 2
def test_jsonlint_a_json_changes_out_of_range(app, pr_context):
diff = """diff --git a/c.json b/c.json
index 9b90002..c36a2a4 100644
--- a/c.json
+++ b/c.json
@@ -3,4 +3,5 @@
"b": 2,
c: 3,
d: 4
+ e: 5
}
"""
spec = Specification()
spec.linters.append(ObjectDict(name='jsonlint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'jsonlint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 0
def test_shellcheck_a_sh(app, pr_context):
diff = """diff --git a/a.sh b/a.sh
new file mode 100644
index 0000000..9fb9840
--- /dev/null
+++ b/a.sh
@@ -0,0 +2 @@
+#!/bin/sh
+$foo=42
"""
spec = Specification()
spec.linters.append(ObjectDict(name='shellcheck', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'shellcheck'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) > 0
problem = lint.problems[0]
assert problem.filename == 'a.sh'
assert problem.line == 2
def test_csslint_a_css(app, pr_context):
diff = """diff --git a/a.css b/a.css
new file mode 100644
index 0000000..5512dae
--- /dev/null
+++ b/a.css
@@ -0,0 +1 @@
+.a {}
"""
spec = Specification()
spec.linters.append(ObjectDict(name='csslint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'csslint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.css'
assert problem.line == 1
def test_flake8_lint_a_py_with_custom_glob_pattern(app, pr_context):
diff = """diff --git a/b.pyx b/b.pyx
new file mode 100644
index 0000000..fdeea15
--- /dev/null
+++ b/b.pyx
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+
+def add(a, b):
+ return a+ b
"""
spec = Specification()
spec.linters.append(ObjectDict(name='flake8', pattern='*.pyx'))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'flake8'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'b.pyx'
assert problem.line == 6
def test_flake8_lint_a_py_with_custom_regex_pattern(app, pr_context):
diff = """diff --git a/b.pyx b/b.pyx
new file mode 100644
index 0000000..fdeea15
--- /dev/null
+++ b/b.pyx
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+
+def add(a, b):
+ return a+ b
"""
spec = Specification()
spec.linters.append(ObjectDict(name='flake8', pattern='^.*\.pyx$'))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'flake8'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'b.pyx'
assert problem.line == 6
def test_yamllint_a_yml(app, pr_context):
diff = """diff --git a/a.yml b/a.yml
new file mode 100644
index 0000000..1eccee8
--- /dev/null
+++ b/a.yml
@@ -0,0 +1,3 @@
+---
+a: 1
+a: 2
"""
spec = Specification()
spec.linters.append(ObjectDict(name='yamllint', pattern=None))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'yamllint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.yml'
assert problem.line == 3
def test_flake8_lint_a_py_with_multi_custom_glob_patterns(app, pr_context):
diff = """diff --git a/b.pyx b/b.pyx
new file mode 100644
index 0000000..fdeea15
--- /dev/null
+++ b/b.pyx
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+
+def add(a, b):
+ return a+ b
"""
spec = Specification()
spec.linters.append(ObjectDict(name='flake8', pattern='*.py *.pyx'))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'flake8'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'b.pyx'
assert problem.line == 6
def test_bandit_lint_a_py(app, pr_context):
diff = """diff --git a/a.py b/a.py
new file mode 100644
index 0000000..719cd56
--- /dev/null
+++ b/a.py
@@ -0,0 +1,4 @@
+try:
+ a = 1
+except Exception:
+ pass
"""
spec = Specification()
spec.linters.append(ObjectDict(name='bandit'))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'bandit'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.py'
assert problem.line == 3
assert not problem.is_error
def test_rstlint_a_rst(app, pr_context):
diff = """diff --git a/a.rst b/a.rst
new file mode 100644
index 0000000..4e46cf9
--- /dev/null
+++ b/a.rst
@@ -0,0 +1,2 @@
+Hello World
+====
"""
spec = Specification()
spec.linters.append(ObjectDict(name='rstlint'))
lint = LintProcessor(pr_context, spec, os.path.join(FIXTURES_PATH, 'rstlint'))
patch = PatchSet(diff.split('\n'))
with mock.patch.object(lint, 'load_changes') as load_changes,\
mock.patch.object(lint, 'update_build_status') as build_status,\
mock.patch.object(lint, '_report') as report:
load_changes.return_value = patch
build_status.return_value = None
report.return_value = (1, 2)
lint.problems.set_changes(patch)
lint.process()
assert load_changes.called
assert len(lint.problems) == 1
problem = lint.problems[0]
assert problem.filename == 'a.rst'
assert problem.line == 2
def test_pylint_lint_a_py(app, pr_context):
diff = """diff | |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import os
import time
import warnings
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import boto3
import numpy as np
from lookoutvision.manifest import Manifest
from lookoutvision.metrics import Metrics
class LookoutForVision:
"""LookoutForVision class to build, train and deploy.
This class helps to build, train and deploy a Amazon Lookout for Vision
project. It implements the three most common methods for model deployment:
# - .fit()
# - .deploy()
# - .predict()
Attributes:
project_name The name of the Amazon Lookout for Vision project.
lv The Amazon Lookout for Vision boto3 client.
model_version The (initial) model version.
"""
def __init__(self, project_name, model_version="1"):
"""Build, train and deploy Amazon Lookout for Vision models.
Technical documentation on how Amazon Lookout for Vision works can be
found at: https://aws.amazon.com/lookout-for-vision/
Args:
project_name (str): Name of the Amazon Lookout for Vision project to interact with.
model_version (str): The (initial) model version.
"""
# super(LookoutForVision, self).__init__()
self.project_name = project_name
self.lv = boto3.client("lookoutvision")
self.s3 = boto3.client("s3")
self.model_version = model_version
self.describe_project()
@classmethod
def _get_param_names(self):
"""Internal get parameter names helper.
It will retrieve all the parameters used within your class.
Args:
None
Returns:
list: all class parameters
"""
init = getattr(self.__init__, "deprecated_original", self.__init__)
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD]
return sorted([p.name for p in parameters])
def describe_project(self):
"""Describe a project.
Args:
None
Returns:
json: The project details
"""
project = {}
# First try to describe the project given by the name:
try:
project = self.lv.describe_project(ProjectName=self.project_name)["ProjectDescription"]["ProjectArn"]
print("Project already exists with arn: " + project)
except Exception as e:
if "ResourceNotFoundException" in str(e):
print(
f"Project {self.project_name} does not exist yet...use the create_project() method to set up your first project"
)
else:
raise Exception
return project
def create_project(self):
"""Create a project.
Args:
None
Returns:
json: The project details
"""
project = {}
# First try to create a new project:
try:
project = self.lv.create_project(ProjectName=self.project_name)["ProjectMetadata"]["ProjectArn"]
print(f"Creating the project: {self.project_name}")
except Exception as e:
if "ConflictException" in str(e):
project = self.lv.describe_project(ProjectName=self.project_name)["ProjectDescription"]["ProjectArn"]
else:
raise Exception
return project
def get_params(self, deep=True):
"""Get class parameters.
Args:
deep (bool): Make a deep copy of parameters for output.
Returns:
json: an object with the internal parameters and their values.
"""
output = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params"):
deep_items = value.get_params().items()
output.update((key + "__" + i, val) for i, val in deep_items)
output[key] = value
return output
def set_params(self, **params):
"""Set class parameters.
Args:
**params (dict): New parameters in key=value format.
Returns:
self: the class itself
"""
if not params:
return self
valid = self.get_params(deep=True)
nested = defaultdict(dict)
for key, value in params.items():
key, delim, sub = key.partition("__")
if key not in valid:
raise ValueError(
"Invalid parameter %s for class %s. "
"Check the list of available parameters "
"with `cls.get_params().keys()`." % (key, self)
)
if delim:
nested[key][sub] = value
else:
setattr(self, key, value)
valid[key] = value
for key, sub_params in nested.items():
valid[key].set_params(**sub_params)
return self
def update_datasets(self, dataset_type, wait=True):
"""Create a dataset.
Args:
dataset_type (dict): A setting from where to get input data from
Format of this input is of type:
"train": {
"bucket": "my_s3_bucket",
"key": "training.manifest",
"version": "1"
},
"test": {
"bucket": "my_s3_bucket",
"key": "validation.manifest",
"version": "1"
}
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
# For each dataset possible...
p = self.lv.describe_project(ProjectName=self.project_name)
for item in p["ProjectDescription"]["Datasets"]:
dtype = item["DatasetType"]
try:
self.lv.delete_dataset(ProjectName=self.project_name, DatasetType=dtype)
except Exception as e:
print("Error in dataset deletion with exception: {}".format(e))
print("Please check CloudWatch logs for troubleshooting!")
return self.create_datasets(dataset_type=dataset_type, wait=wait)
def create_datasets(self, dataset_type, wait=True):
"""Create a dataset.
Args:
dataset_type (dict): A setting from where to get input data from
Format of this input is of type:
"train": {
"bucket": "my_s3_bucket",
"key": "training.manifest",
"version": "1"
},
"test": {
"bucket": "my_s3_bucket",
"key": "validation.manifest",
"version": "1"
}
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
datasets = {}
# For each dataset used...
for key in dataset_type:
# ...create a dataset
d_type = "train" if (key == "training" or key == "train") else "test"
try:
dataset = self.lv.create_dataset(
ProjectName=self.project_name,
DatasetType=d_type,
DatasetSource={
"GroundTruthManifest": {
"S3Object": {"Bucket": dataset_type[key]["bucket"], "Key": dataset_type[key]["key"]}
}
},
)["DatasetMetadata"]
# Log output
datasets[key] = dataset
except Exception as e:
if "ConflictException" in str(e):
print("Dataset already existed in the project")
print("If the dataset already exists try updating it with: update_datasets")
else:
print("Error in create_datasets with exception: {}".format(e))
raise Exception
return datasets
# Notify user when creation is done:
print("Creating dataset(s):", end=" ")
if wait:
stati = ["CREATE_IN_PROGRESS"]
while (np.array(stati) == "CREATE_IN_PROGRESS").any():
stati = []
for key in dataset_type:
d_type = "train" if (key == "training" or key == "train") else "test"
stat = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType=d_type)[
"DatasetDescription"
]
stati.append(stat["Status"])
datasets[key]["Status"] = stat["Status"]
datasets[key]["StatusMessage"] = stat["StatusMessage"]
if stat["Status"] == "CREATE_FAILED":
warnings.warn(
"Failed to create dataset {} with status message: {}.".format(key, stat["StatusMessage"])
)
print("-", end="")
time.sleep(5)
print("!")
return datasets
def fit(self, output_bucket, model_prefix=None, train_and_test=True, wait=True):
"""Train the model.
Create a model from the datasets
At first check whether the minimum no of images are available to train the model.
There should be min 20 normal and 10 anomalous images in training/train dataset.
Args:
output_bucket (str): The output S3 bucket to be used for model logging.
model_prefix (str): Optional to add a model prefix name for logging.
train_and_test (bool): Whether to us train or train and test set
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
ready_to_go = True
test_dataset = {"Status": "No test dataset used!"}
train_dataset = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType="train")[
"DatasetDescription"
]
normal_no_images_train = train_dataset["ImageStats"]["Normal"]
anomaly_no_images_train = train_dataset["ImageStats"]["Anomaly"]
try:
if train_and_test:
test_dataset = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType="test")[
"DatasetDescription"
]
normal_no_images_test = test_dataset["ImageStats"]["Normal"]
anomaly_no_images_test = test_dataset["ImageStats"]["Anomaly"]
if normal_no_images_train < 10 and normal_no_images_test < 10 and anomaly_no_images_test < 10:
ready_to_go = False
else:
if normal_no_images_train < 20 and anomaly_no_images_train < 10:
ready_to_go = False
except Exception as e:
if "ResourceNotFoundException" in str(e):
print(
"There is no Test Dataset, hence model will be trained with Training Dataset only and can not be validated with Test Dataset"
)
if ready_to_go:
try:
model = self.lv.create_model(
ProjectName=self.project_name,
OutputConfig={
"S3Location": {
"Bucket": output_bucket,
"Prefix": model_prefix if model_prefix is not None else "",
}
},
)["ModelMetadata"]
if wait:
print("Model training started:", end=" ")
version = model["ModelVersion"]
status = model["Status"]
while status not in ["TRAINED", "TRAINING_FAILED"]:
update = self.lv.describe_model(ProjectName=self.project_name, ModelVersion=version)[
"ModelDescription"
]
status = update["Status"]
print("-", end="")
time.sleep(60)
print("!")
else:
print(
"""Model is being created. Training will take a while.\n
Please check your Management Console on progress.\n
You can continue with deployment once the model is trained.\n
"""
)
# Return success
return {
"status": "Success!",
"project": self.project_name,
"train_datasets": train_dataset,
"test_datasets": test_dataset,
"model": model,
}
except Exception as e:
if "ServiceQuotaExceededException" in str(e):
print(
"You've reached the limit (2) for concurrent model trainings. Try again after at least one model has finished training. You can also request a limit increase. "
)
else:
print(
"""Number of images is not sufficient, at least 20 normal and 10 anomaly\n
imgages are required for training images
"""
)
return {
"status": "Failure!",
"project": self.project_name,
"train_datasets": train_dataset,
"test_datasets": test_dataset,
"model": None,
}
def deploy(self, min_inf_units=1, model_version=None, wait=True):
"""Deploy your model.
Args:
min_inf_units (int): Minimal number | |
verbose=False, i_up=None, i_G1=None, UW=None, UUW=None, h2=0.5, **kwargs):
'''
For a given h2, finds the optimal kernel mixture weight a2 and returns the negative log-likelihood
Find the optimal a2 given h2, such that K=(1.0-a2)*K0+a2*K1. Performs a double loop optimization (could be expensive for large grid-sizes)
(default maxA2 value is set to 1 as loss of positive definiteness of the final model covariance only depends on h2, not a2)
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
Args:
h2 : "heritability" of the kernel matrix
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
verbose : verbose output? (default: False)
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted of
i_G1 : indeces of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
Returns:
dictionary containing the model parameters at the optimal a2
'''
if self.Y.shape[1] > 1:
print "not implemented"
raise NotImplementedError("only single pheno case implemented")
self.numcalls = 0
resmin = [None]
def f(x,resmin=resmin, **kwargs):
self.numcalls+=1
t0 = time.time()
h2_1 = (1.0 - h2) * x
res = self.nLLeval_2K(h2_1=h2_1, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, h2=h2, **kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
t1 = time.time()
#print "one objective function call took %.2f seconds elapsed" % (t1-t0)
#import pdb; pdb.set_trace()
return res['nLL']
if verbose: print "finda2"
min = minimize1D(f=f, nGrid=nGridA2, minval=minA2, maxval=maxA2,verbose=False)
#print "numcalls to innerLoopTwoKernel= " + str(self.numcalls)
return resmin[0]
def findH2_2K(self, nGridH2=10, minH2=0.0, maxH2=0.99999, nGridA2=10, minA2=0.0, maxA2=1.0, i_up=None, i_G1=None, UW=None, UUW=None, **kwargs):
'''
Find the optimal h2 and a2 for a given K (and G1 - if provided in W).
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Allows to provide a second "low-rank" kernel matrix in form of a rotated design matrix W
second kernel K2 = W.dot(W.T))
W may hold a design matrix G1 of a second kernel and some columns that are identical to columns of the design matrix of the first kernel to enable subtracting out sub kernels (as for correcting for proximal contamination)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minH2 : minimum value for h2 optimization
maxH2 : maximum value for h2 optimization
nGridA2 : number of a2-grid points to evaluate the negative log-likelihood at. Number of grid points for Brent search intervals (default: 10)
minA2 : minimum value for a2 optimization
maxA2 : maximum value for a2 optimization
i_up : indices of columns in W corresponding to columns from first kernel that are subtracted of
i_G1 : indeces of columns in W corresponding to columns of the design matrix for second kernel G1
UW : U.T.dot(W), where W is [N x S2] np.array holding the design matrix of the second kernel
UUW : W - U.dot(U.T.dot(W)) (provide None if U is full rank)
Returns:
dictionary containing the model parameters at the optimal h2 (and a2 if a G1 is provided in W)
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
if self.Y.shape[1] > 1:
print "not implemented"
raise NotImplementedError("only single pheno case implemented")
resmin = [None]
noG1 = True
if i_G1.any():
noG1 = False
def f(x,resmin=resmin,**kwargs):
if noG1:
res = self.nLLeval_2K(h2_1=0.0, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, h2=x, **kwargs)
else:
res = self.innerLoop_2K(h2=x, i_up=i_up, i_G1=i_G1, UW=UW, UUW=UUW, nGridA2=nGridA2, minA2=minA2, maxA2=maxA2, **kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
return res['nLL']
min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2)
return resmin[0]
def find_log_delta(self, sid_count=1, min_log_delta=-5, max_log_delta=10, nGrid=10, **kwargs):
'''
perform search for optimal log delta (single kernel case)
Args:
sid_count: number of log delta grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
min_log_delta: minimum value for log delta search (default: -5)
max_log_delta: maximum value for log delta search (default: 5)
nGrid: number of grid points for Brent search intervals (default: 10)
Returns:
dictionary containing the model parameters at the optimal log delta
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None]
#logging.info("starting log_delta search")
def f(x,resmin=resmin,**kwargs):
h2 = 1.0 / (np.exp(x) * sid_count + 1) #We convert from external log_delta to h2 and then back again so that this
#code is most similar to findH2
res = self.nLLeval(h2=h2,**kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
min = minimize1D(f=f, nGrid=nGrid, minval=min_log_delta, maxval=max_log_delta)
res = resmin[0]
internal_delta = 1.0 / res['h2'] - 1.0
ln_external_delta = np.log(internal_delta / sid_count)
res['log_delta'] = ln_external_delta
return res
def findH2(self, nGridH2=10, minH2=0.0, maxH2=0.99999, estimate_Bayes=False, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
minH2 : minimum value for h2 optimization (default: 0.0)
maxH2 : maximum value for h2 optimization (default: 0.99999)
estimate_Bayes: implement me! (default: False)
Returns:
dictionary containing the model parameters at the optimal h2
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None for i in xrange(self.Y.shape[1])]
#logging.info("starting H2 search")
assert estimate_Bayes == False, "not implemented"
if self.Y.shape[1] > 1:
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#check all results for local minimum:
for i in xrange(self.Y.shape[1]):
if (resmin[i] is None) or (res['nLL'][i] < resmin[i]['nLL']):
resmin[i] = res.copy()
resmin[i]['nLL'] = res['nLL'][i]
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
(evalgrid,resultgrid) = evalgrid1D(f, evalgrid = None, nGrid=nGridH2, minval=minH2, maxval = maxH2, dimF=self.Y.shape[1])
#import ipdb;ipdb.set_trace()
return resmin
elif estimate_Bayes:
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#logging.info("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL']
(evalgrid,resultgrid) = evalgrid1D(f, evalgrid = None, nGrid=nGridH2, minval=minH2, maxval = maxH2, dimF=self.Y.shape[1])
lik = np.exp(-resultgrid)
evalgrid = lik * evalgrid[:,np.newaxis]
posterior_mean = evalgrid.sum(0) / lik.sum(0)
return posterior_mean
else:
def f(x,resmin=resmin):
res = self.nLLeval(h2=x,**kwargs)
if (resmin[0] is None) or (res['nLL'] < resmin[0]['nLL']):
resmin[0] = res
logging.debug("search\t{0}\t{1}".format(x,res['nLL']))
return res['nLL'][0]
min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2)
#logging.info("search\t{0}\t{1}".format("?",resmin[0]))
return resmin[0]
def posterior_h2(self, nGridH2=1000, minH2=0.0, maxH2=0.99999, **kwargs):
'''
Find the optimal h2 for a given K. Note that this is the single kernel case. So there is no a2.
(default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance)
Args:
nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at. Number of columns in design matrix for kernel for normalization (default: 10)
minH2 : minimum value for h2 optimization (default: 0.0)
maxH2 : maximum value for h2 optimization (default: 0.99999)
estimate_Bayes: implement me! (default: False)
Returns:
dictionary containing the model parameters at the optimal h2
'''
#f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL'])
resmin = [None]
#logging.info("starting H2 search")
assert self.Y.shape[1] == 1, "only works for single phenotype"
def f(x):
res = self.nLLeval(h2=x,**kwargs)
#check all results for local minimum:
if (resmin[0] is None):
resmin[0] = {'nLL':res['nLL'],'h2':np.zeros_like(res['nLL'])+res['h2']}
else:
for i in xrange(self.Y.shape[1]):
if (res['nLL'][i] < resmin[0]['nLL'][i]):
resmin[0]['nLL'][i] = res['nLL'][i]
resmin[0]['h2'][i] | |
that contains classes sub-packages from the
For a split package this contains a list of classes in that package that are
provided by the bootclasspath_fragment and a list of classes
"""
# The list of classes/sub-packages that is provided by the
# bootclasspath_fragment.
bcpf: typing.List[str]
# The list of classes/sub-packages that is provided by other modules on the
# bootclasspath.
other: typing.List[str]
@dataclasses.dataclass()
class Result:
"""Encapsulates the result of the analysis."""
# The diffs in the flags.
diffs: typing.Optional[FlagDiffs] = None
# A map from package name to the reason why it belongs in the
# split_packages property.
split_packages: typing.Dict[str, PackagePropertyReason] = dataclasses.field(
default_factory=dict)
# A map from package name to the reason why it belongs in the
# single_packages property.
single_packages: typing.Dict[str,
PackagePropertyReason] = dataclasses.field(
default_factory=dict)
# The list of packages to add to the package_prefixes property.
package_prefixes: typing.List[str] = dataclasses.field(default_factory=list)
# The bootclasspath_fragment hidden API properties changes.
property_changes: typing.List[HiddenApiPropertyChange] = dataclasses.field(
default_factory=list)
# The list of file changes.
file_changes: typing.List[FileChange] = dataclasses.field(
default_factory=list)
class ClassProvider(enum.Enum):
"""The source of a class found during the hidden API processing"""
BCPF = "bcpf"
OTHER = "other"
# A fake member to use when using the signature trie to compute the package
# properties from hidden API flags. This is needed because while that
# computation only cares about classes the trie expects a class to be an
# interior node but without a member it makes the class a leaf node. That causes
# problems when analyzing inner classes as the outer class is a leaf node for
# its own entry but is used as an interior node for inner classes.
_FAKE_MEMBER = ";->fake()V"
@dataclasses.dataclass()
class BcpfAnalyzer:
# Path to this tool.
tool_path: str
# Directory pointed to by ANDROID_BUILD_OUT
top_dir: str
# Directory pointed to by OUT_DIR of {top_dir}/out if that is not set.
out_dir: str
# Directory pointed to by ANDROID_PRODUCT_OUT.
product_out_dir: str
# The name of the bootclasspath_fragment module.
bcpf: str
# The name of the apex module containing {bcpf}, only used for
# informational purposes.
apex: str
# The name of the sdk module containing {bcpf}, only used for
# informational purposes.
sdk: str
# If true then this will attempt to automatically fix any issues that are
# found.
fix: bool = False
# All the signatures, loaded from all-flags.csv, initialized by
# load_all_flags().
_signatures: typing.Set[str] = dataclasses.field(default_factory=set)
# All the classes, loaded from all-flags.csv, initialized by
# load_all_flags().
_classes: typing.Set[str] = dataclasses.field(default_factory=set)
# Information loaded from module-info.json, initialized by
# load_module_info().
module_info: ModuleInfo = None
@staticmethod
def reformat_report_test(text):
return re.sub(r"(.)\n([^\s])", r"\1 \2", text)
def report(self, text="", **kwargs):
# Concatenate lines that are not separated by a blank line together to
# eliminate formatting applied to the supplied text to adhere to python
# line length limitations.
text = self.reformat_report_test(text)
logging.info("%s", text, **kwargs)
def report_dedent(self, text, **kwargs):
text = textwrap.dedent(text)
self.report(text, **kwargs)
def run_command(self, cmd, *args, **kwargs):
cmd_line = " ".join(cmd)
logging.debug("Running %s", cmd_line)
subprocess.run(
cmd,
*args,
check=True,
cwd=self.top_dir,
stderr=subprocess.STDOUT,
stdout=log_stream_for_subprocess(),
text=True,
**kwargs)
@property
def signatures(self):
if not self._signatures:
raise Exception("signatures has not been initialized")
return self._signatures
@property
def classes(self):
if not self._classes:
raise Exception("classes has not been initialized")
return self._classes
def load_all_flags(self):
all_flags = self.find_bootclasspath_fragment_output_file(
"all-flags.csv")
# Extract the set of signatures and a separate set of classes produced
# by the bootclasspath_fragment.
with open(all_flags, "r", encoding="utf8") as f:
for line in newline_stripping_iter(f.readline):
signature = self.line_to_signature(line)
self._signatures.add(signature)
class_name = self.signature_to_class(signature)
self._classes.add(class_name)
def load_module_info(self):
module_info_file = os.path.join(self.product_out_dir,
"module-info.json")
self.report(f"\nMaking sure that {module_info_file} is up to date.\n")
output = self.build_file_read_output(module_info_file)
lines = output.lines()
for line in lines:
logging.debug("%s", line)
output.wait(timeout=10)
if output.returncode:
raise Exception(f"Error building {module_info_file}")
abs_module_info_file = os.path.join(self.top_dir, module_info_file)
self.module_info = ModuleInfo.load(abs_module_info_file)
@staticmethod
def line_to_signature(line):
return line.split(",")[0]
@staticmethod
def signature_to_class(signature):
return signature.split(";->")[0]
@staticmethod
def to_parent_package(pkg_or_class):
return pkg_or_class.rsplit("/", 1)[0]
def module_path(self, module_name):
return self.module_info.module_path(module_name)
def module_out_dir(self, module_name):
module_path = self.module_path(module_name)
return os.path.join(self.out_dir, "soong/.intermediates", module_path,
module_name)
def find_bootclasspath_fragment_output_file(self, basename, required=True):
# Find the output file of the bootclasspath_fragment with the specified
# base name.
found_file = ""
bcpf_out_dir = self.module_out_dir(self.bcpf)
for (dirpath, _, filenames) in os.walk(bcpf_out_dir):
for f in filenames:
if f == basename:
found_file = os.path.join(dirpath, f)
break
if not found_file and required:
raise Exception(f"Could not find {basename} in {bcpf_out_dir}")
return found_file
def analyze(self):
"""Analyze a bootclasspath_fragment module.
Provides help in resolving any existing issues and provides
optimizations that can be applied.
"""
self.report(f"Analyzing bootclasspath_fragment module {self.bcpf}")
self.report_dedent(f"""
Run this tool to help initialize a bootclasspath_fragment module.
Before you start make sure that:
1. The current checkout is up to date.
2. The environment has been initialized using lunch, e.g.
lunch aosp_arm64-userdebug
3. You have added a bootclasspath_fragment module to the appropriate
Android.bp file. Something like this:
bootclasspath_fragment {{
name: "{self.bcpf}",
contents: [
"...",
],
// The bootclasspath_fragments that provide APIs on which this
// depends.
fragments: [
{{
apex: "com.android.art",
module: "art-bootclasspath-fragment",
}},
],
}}
4. You have added it to the platform_bootclasspath module in
frameworks/base/boot/Android.bp. Something like this:
platform_bootclasspath {{
name: "platform-bootclasspath",
fragments: [
...
{{
apex: "{self.apex}",
module: "{self.bcpf}",
}},
],
}}
5. You have added an sdk module. Something like this:
sdk {{
name: "{self.sdk}",
bootclasspath_fragments: ["{self.bcpf}"],
}}
""")
# Make sure that the module-info.json file is up to date.
self.load_module_info()
self.report_dedent("""
Cleaning potentially stale files.
""")
# Remove the out/soong/hiddenapi files.
shutil.rmtree(f"{self.out_dir}/soong/hiddenapi", ignore_errors=True)
# Remove any bootclasspath_fragment output files.
shutil.rmtree(self.module_out_dir(self.bcpf), ignore_errors=True)
self.build_monolithic_stubs_flags()
result = Result()
self.build_monolithic_flags(result)
self.analyze_hiddenapi_package_properties(result)
self.explain_how_to_check_signature_patterns()
# If there were any changes that need to be made to the Android.bp
# file then either apply or report them.
if result.property_changes:
bcpf_dir = self.module_info.module_path(self.bcpf)
bcpf_bp_file = os.path.join(self.top_dir, bcpf_dir, "Android.bp")
if self.fix:
tool_dir = os.path.dirname(self.tool_path)
bpmodify_path = os.path.join(tool_dir, "bpmodify")
bpmodify_runner = BpModifyRunner(bpmodify_path)
for property_change in result.property_changes:
property_change.fix_bp_file(bcpf_bp_file, self.bcpf,
bpmodify_runner)
result.file_changes.append(
self.new_file_change(
bcpf_bp_file,
f"Updated hidden_api properties of '{self.bcpf}'"))
else:
hiddenapi_snippet = ""
for property_change in result.property_changes:
hiddenapi_snippet += property_change.snippet(" ")
# Remove leading and trailing blank lines.
hiddenapi_snippet = hiddenapi_snippet.strip("\n")
result.file_changes.append(
self.new_file_change(
bcpf_bp_file, f"""
Add the following snippet into the {self.bcpf} bootclasspath_fragment module
in the {bcpf_dir}/Android.bp file. If the hidden_api block already exists then
merge these properties into it.
hidden_api: {{
{hiddenapi_snippet}
}},
"""))
if result.file_changes:
if self.fix:
file_change_message = textwrap.dedent("""
The following files were modified by this script:
""")
else:
file_change_message = textwrap.dedent("""
The following modifications need to be made:
""")
self.report(file_change_message)
result.file_changes.sort()
for file_change in result.file_changes:
self.report(f" {file_change.path}")
self.report(f" {file_change.description}")
self.report()
if not self.fix:
self.report_dedent("""
Run the command again with the --fix option to automatically
make the above changes.
""".lstrip("\n"))
def new_file_change(self, file, description):
return FileChange(
path=os.path.relpath(file, self.top_dir), description=description)
def check_inconsistent_flag_lines(self, significant, module_line,
monolithic_line, separator_line):
if not (module_line.startswith("< ") and
monolithic_line.startswith("> ") and not separator_line):
# Something went wrong.
self.report("Invalid build output detected:")
self.report(f" module_line: '{module_line}'")
self.report(f" monolithic_line: '{monolithic_line}'")
self.report(f" separator_line: '{separator_line}'")
sys.exit(1)
if significant:
logging.debug("%s", module_line)
logging.debug("%s", monolithic_line)
logging.debug("%s", separator_line)
def scan_inconsistent_flags_report(self, lines):
"""Scans a hidden API flags report
The hidden API inconsistent flags report which looks something like
this.
< out/soong/.intermediates/.../filtered-stub-flags.csv
> out/soong/hiddenapi/hiddenapi-stub-flags.txt
< Landroid/compat/Compatibility;->clearOverrides()V
> Landroid/compat/Compatibility;->clearOverrides()V,core-platform-api
"""
# The basic format of an entry in the inconsistent flags report is:
# <module specific flag>
# <monolithic flag>
# <separator>
#
# Wrap the lines iterator in an iterator which returns a tuple
# consisting of the three separate lines.
triples = zip(lines, lines, lines)
module_line, monolithic_line, separator_line = next(triples)
significant = False
bcpf_dir = self.module_info.module_path(self.bcpf)
if os.path.join(bcpf_dir, self.bcpf) in module_line:
# These errors are related to the bcpf being analyzed so
# keep them.
significant = True
else:
self.report(f"Filtering out errors related to {module_line}")
self.check_inconsistent_flag_lines(significant, module_line,
monolithic_line, separator_line)
diffs = {}
for module_line, monolithic_line, separator_line in triples:
self.check_inconsistent_flag_lines(significant, module_line,
monolithic_line, "")
module_parts = module_line.removeprefix("< ").split(",")
module_signature = module_parts[0]
module_flags = module_parts[1:]
monolithic_parts = monolithic_line.removeprefix("> ").split(",")
monolithic_signature = monolithic_parts[0]
monolithic_flags = monolithic_parts[1:]
if module_signature != monolithic_signature:
# Something went wrong.
self.report("Inconsistent signatures detected:")
self.report(f" module_signature: '{module_signature}'")
self.report(f" monolithic_signature: '{monolithic_signature}'")
sys.exit(1)
diffs[module_signature] = (module_flags, monolithic_flags)
if separator_line:
# If the separator line is not blank | |
<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import range
from past.utils import old_div
from builtins import object
from .consts import *
from .utils import *
from six.moves import map
from six.moves import range
class _dumb_repr(object):
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.__dict__)
class SWFRawTag(_dumb_repr):
def __init__(self, s=None):
if not s is None:
self.parse(s)
def parse(self, s):
pos = s.tell()
self.header = s.readtag_header()
self.pos_content = s.tell()
s.f.seek(pos)
#self.bytes = s.f.read(self.header.tag_length())
#s.f.seek(self.pos_content)
class SWFStraightEdge(_dumb_repr):
def __init__(self, start, to, line_style_idx, fill_style_idx):
self.start = start
self.to = to
self.line_style_idx = line_style_idx
self.fill_style_idx = fill_style_idx
def reverse_with_new_fillstyle(self, new_fill_idx):
return SWFStraightEdge(self.to, self.start, self.line_style_idx, new_fill_idx)
class SWFCurvedEdge(SWFStraightEdge):
def __init__(self, start, control, to, line_style_idx, fill_style_idx):
super(SWFCurvedEdge, self).__init__(start, to, line_style_idx, fill_style_idx)
self.control = control
def reverse_with_new_fillstyle(self, new_fill_idx):
return SWFCurvedEdge(self.to, self.control, self.start, self.line_style_idx, new_fill_idx)
class SWFShape(_dumb_repr):
def __init__(self, data=None, level=1, unit_divisor=20.0):
self._records = []
self._fillStyles = []
self._lineStyles = []
self._postLineStyles = {}
self._edgeMapsCreated = False
self.unit_divisor = unit_divisor
self.fill_edge_maps = []
self.line_edge_maps = []
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups = 0
self.coord_map = {}
if not data is None:
self.parse(data, level)
def get_dependencies(self):
s = set()
for x in self._fillStyles:
s.update(x.get_dependencies())
for x in self._lineStyles:
s.update(x.get_dependencies())
return s
def parse(self, data, level=1):
data.reset_bits_pending()
fillbits = data.readUB(4)
linebits = data.readUB(4)
self.read_shape_records(data, fillbits, linebits, level)
def export(self, handler=None):
self._create_edge_maps()
if handler is None:
from .export import SVGShapeExporter
handler = SVGShapeExporter()
handler.begin_shape()
for i in range(0, self.num_groups):
self._export_fill_path(handler, i)
self._export_line_path(handler, i)
handler.end_shape()
return handler
@property
def records(self):
return self._records
def read_shape_records(self, data, fill_bits, line_bits, level=1):
shape_record = None
record_id = 0
while type(shape_record) != SWFShapeRecordEnd:
# The SWF10 spec says that shape records are byte aligned.
# In reality they seem not to be?
# bitsPending = 0;
edge_record = (data.readUB(1) == 1)
if edge_record:
straight_flag = (data.readUB(1) == 1)
num_bits = data.readUB(4) + 2
if straight_flag:
shape_record = data.readSTRAIGHTEDGERECORD(num_bits)
else:
shape_record = data.readCURVEDEDGERECORD(num_bits)
else:
states= data.readUB(5)
if states == 0:
shape_record = SWFShapeRecordEnd()
else:
style_change_record = data.readSTYLECHANGERECORD(states, fill_bits, line_bits, level)
if style_change_record.state_new_styles:
fill_bits = style_change_record.num_fillbits
line_bits = style_change_record.num_linebits
shape_record = style_change_record
shape_record.record_id = record_id
self._records.append(shape_record)
record_id += 1
#print shape_record.tostring()
def _create_edge_maps(self):
if self._edgeMapsCreated:
return
xPos = 0
yPos = 0
sub_path = []
fs_offset = 0
ls_offset = 0
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
self.fill_edge_maps = []
self.line_edge_maps = []
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups = 0
for i in range(0, len(self._records)):
rec = self._records[i]
if rec.type == SWFShapeRecord.TYPE_STYLECHANGE:
if rec.state_line_style or rec.state_fill_style0 or rec.state_fill_style1:
if len(sub_path):
self._process_sub_path(sub_path, curr_ls_idx, curr_fs_idx0, curr_fs_idx1, rec.record_id)
sub_path = []
if rec.state_new_styles:
fs_offset = len(self._fillStyles)
ls_offset = len(self._lineStyles)
self._append_to(self._fillStyles, rec.fill_styles)
self._append_to(self._lineStyles, rec.line_styles)
if rec.state_line_style and rec.state_fill_style0 and rec.state_fill_style1 and \
rec.line_style == 0 and rec.fill_style0 == 0 and rec.fill_style1 == 0:
# new group (probably)
self._clean_edge_map(self.current_fill_edge_map)
self._clean_edge_map(self.current_line_edge_map)
self.fill_edge_maps.append(self.current_fill_edge_map)
self.line_edge_maps.append(self.current_line_edge_map)
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups += 1
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
else:
if rec.state_line_style:
curr_ls_idx = rec.line_style
if curr_ls_idx > 0:
curr_ls_idx += ls_offset
if rec.state_fill_style0:
curr_fs_idx0 = rec.fill_style0
if curr_fs_idx0 > 0:
curr_fs_idx0 += fs_offset
if rec.state_fill_style1:
curr_fs_idx1 = rec.fill_style1
if curr_fs_idx1 > 0:
curr_fs_idx1 += fs_offset
if rec.state_moveto:
xPos = rec.move_deltaX
yPos = rec.move_deltaY
elif rec.type == SWFShapeRecord.TYPE_STRAIGHTEDGE:
start = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
if rec.general_line_flag:
xPos += rec.deltaX
yPos += rec.deltaY
else:
if rec.vert_line_flag:
yPos += rec.deltaY
else:
xPos += rec.deltaX
to = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
sub_path.append(SWFStraightEdge(start, to, curr_ls_idx, curr_fs_idx1))
elif rec.type == SWFShapeRecord.TYPE_CURVEDEDGE:
start = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
xPosControl = xPos + rec.control_deltaX
yPosControl = yPos + rec.control_deltaY
xPos = xPosControl + rec.anchor_deltaX
yPos = yPosControl + rec.anchor_deltaY
control = [xPosControl, yPosControl]
to = [NumberUtils.round_pixels_400(xPos), NumberUtils.round_pixels_400(yPos)]
sub_path.append(SWFCurvedEdge(start, control, to, curr_ls_idx, curr_fs_idx1))
elif rec.type == SWFShapeRecord.TYPE_END:
# We're done. Process the last subpath, if any
if len(sub_path) > 0:
self._process_sub_path(sub_path, curr_ls_idx, curr_fs_idx0, curr_fs_idx1, rec.record_id)
self._clean_edge_map(self.current_fill_edge_map)
self._clean_edge_map(self.current_line_edge_map)
self.fill_edge_maps.append(self.current_fill_edge_map)
self.line_edge_maps.append(self.current_line_edge_map)
self.current_fill_edge_map = {}
self.current_line_edge_map = {}
self.num_groups += 1
curr_fs_idx0 = 0
curr_fs_idx1 = 0
curr_ls_idx = 0
self._edgeMapsCreated = True
def _process_sub_path(self, sub_path, linestyle_idx, fillstyle_idx0, fillstyle_idx1, record_id=-1):
path = None
if fillstyle_idx0 != 0:
if not fillstyle_idx0 in self.current_fill_edge_map:
path = self.current_fill_edge_map[fillstyle_idx0] = []
else:
path = self.current_fill_edge_map[fillstyle_idx0]
for j in range(len(sub_path) - 1, -1, -1):
path.append(sub_path[j].reverse_with_new_fillstyle(fillstyle_idx0))
if fillstyle_idx1 != 0:
if not fillstyle_idx1 in self.current_fill_edge_map:
path = self.current_fill_edge_map[fillstyle_idx1] = []
else:
path = self.current_fill_edge_map[fillstyle_idx1]
self._append_to(path, sub_path)
if linestyle_idx != 0:
if not linestyle_idx in self.current_line_edge_map:
path = self.current_line_edge_map[linestyle_idx] = []
else:
path = self.current_line_edge_map[linestyle_idx]
self._append_to(path, sub_path)
def _clean_edge_map(self, edge_map):
for style_idx in edge_map:
sub_path = edge_map[style_idx] if style_idx in edge_map else None
if sub_path is not None and len(sub_path) > 0:
tmp_path = []
prev_edge = None
self._create_coord_map(sub_path)
while len(sub_path) > 0:
idx = 0
while idx < len(sub_path):
if prev_edge is None or self._equal_point(prev_edge.to, sub_path[idx].start):
edge = sub_path[idx]
del sub_path[idx]
tmp_path.append(edge)
self._remove_edge_from_coord_map(edge)
prev_edge = edge
else:
edge = self._find_next_edge_in_coord_map(prev_edge)
if not edge is None:
idx = sub_path.index(edge)
else:
idx = 0
prev_edge = None
edge_map[style_idx] = tmp_path
def _equal_point(self, a, b, tol=0.001):
return (a[0] > b[0]-tol and a[0] < b[0]+tol and a[1] > b[1]-tol and a[1] < b[1]+tol)
def _find_next_edge_in_coord_map(self, edge):
key = "%0.4f_%0.4f" % (edge.to[0], edge.to[1])
if key in self.coord_map and len(self.coord_map[key]) > 0:
return self.coord_map[key][0]
else:
return None
def _create_coord_map(self, path):
self.coord_map = {}
for i in range(0, len(path)):
start = path[i].start
key = "%0.4f_%0.4f" % (start[0], start[1])
coord_map_array = self.coord_map[key] if key in self.coord_map else None
if coord_map_array is None:
self.coord_map[key] = [path[i]]
else:
self.coord_map[key].append(path[i])
def _remove_edge_from_coord_map(self, edge):
key = "%0.4f_%0.4f" % (edge.start[0], edge.start[1])
if key in self.coord_map:
coord_map_array = self.coord_map[key]
if len(coord_map_array) == 1:
del self.coord_map[key]
else:
try:
idx = coord_map_array.index(edge)
del coord_map_array[idx]
except:
pass
def _create_path_from_edge_map(self, edge_map):
new_path = []
style_ids = []
for style_id in edge_map:
style_ids.append(int(style_id))
style_ids = sorted(style_ids)
for i in range(0, len(style_ids)):
self._append_to(new_path, edge_map[style_ids[i]])
return new_path
def _export_fill_path(self, handler, group_index):
path = self._create_path_from_edge_map(self.fill_edge_maps[group_index])
pos = [100000000, 100000000]
u = 1.0 / self.unit_divisor
fill_style_idx = 10000000
if len(path) < 1:
return
handler.begin_fills()
for i in range(0, len(path)):
e = path[i]
if fill_style_idx != e.fill_style_idx:
fill_style_idx = e.fill_style_idx
pos = [100000000, 100000000]
try:
fill_style = self._fillStyles[fill_style_idx - 1] if fill_style_idx > 0 else None
if fill_style.type == 0x0:
# solid fill
handler.begin_fill(
ColorUtils.rgb(fill_style.rgb),
ColorUtils.alpha(fill_style.rgb))
elif fill_style.type in [0x10, 0x12, 0x13]:
# gradient fill
colors = []
ratios = []
alphas = []
for j in range(0, len(fill_style.gradient.records)):
gr = fill_style.gradient.records[j]
colors.append(ColorUtils.rgb(gr.color))
ratios.append(gr.ratio)
alphas.append(ColorUtils.alpha(gr.color))
handler.begin_gradient_fill(
GradientType.LINEAR if fill_style.type == 0x10 else GradientType.RADIAL,
colors, alphas, ratios,
fill_style.gradient_matrix,
fill_style.gradient.spreadmethod,
fill_style.gradient.interpolation_mode,
fill_style.gradient.focal_point
)
elif fill_style.type in [0x40, 0x41, 0x42, 0x43]:
# bitmap fill
handler.begin_bitmap_fill(
fill_style.bitmap_id,
fill_style.bitmap_matrix,
(fill_style.type == 0x40 or fill_style.type == 0x42),
(fill_style.type == 0x40 or fill_style.type == 0x41)
)
pass
except:
# Font shapes define no fillstyles per se, but do reference fillstyle index 1,
# which represents the font color. We just report solid black in this case.
handler.begin_fill(0)
if not self._equal_point(pos, e.start):
handler.move_to(e.start[0] * u, e.start[1] * u)
if type(e) is SWFCurvedEdge:
handler.curve_to(e.control[0] * u, e.control[1] * u, e.to[0] * u, e.to[1] * u)
else:
handler.line_to(e.to[0] * u, e.to[1] * u)
pos = e.to
handler.end_fill()
handler.end_fills()
def _export_line_path(self, handler, group_index):
path = self._create_path_from_edge_map(self.line_edge_maps[group_index])
pos = [100000000, 100000000]
u = 1.0 / self.unit_divisor
line_style_idx = 10000000
line_style = None
if len(path) < 1:
return
handler.begin_lines()
for i in range(0, len(path)):
e = path[i]
if line_style_idx != e.line_style_idx:
line_style_idx = e.line_style_idx
pos = [100000000, 100000000]
try:
line_style = self._lineStyles[line_style_idx - 1]
except:
line_style = None
if line_style is not None:
scale_mode = LineScaleMode.NORMAL
if line_style.no_hscale_flag and line_style.no_vscale_flag:
scale_mode = LineScaleMode.NONE
elif line_style.no_hscale_flag:
scale_mode = LineScaleMode.HORIZONTAL
elif line_style.no_hscale_flag:
scale_mode = LineScaleMode.VERTICAL
if not line_style.has_fill_flag:
handler.line_style(
line_style.width / 20.0,
ColorUtils.rgb(line_style.color),
ColorUtils.alpha(line_style.color),
line_style.pixelhinting_flag,
scale_mode,
line_style.start_caps_style,
line_style.end_caps_style,
line_style.joint_style,
line_style.miter_limit_factor)
else:
fill_style = line_style.fill_type
if fill_style.type in [0x10, 0x12, 0x13]:
# gradient fill
colors = []
ratios = []
alphas = []
for j in range(0, len(fill_style.gradient.records)):
gr = fill_style.gradient.records[j]
colors.append(ColorUtils.rgb(gr.color))
ratios.append(gr.ratio)
alphas.append(ColorUtils.alpha(gr.color))
handler.line_gradient_style(
line_style.width / 20.0,
line_style.pixelhinting_flag,
scale_mode,
line_style.start_caps_style,
line_style.end_caps_style,
line_style.joint_style,
line_style.miter_limit_factor,
GradientType.LINEAR if fill_style.type == | |
from collections import OrderedDict
import os
import json
import re
import sys
import pytest
import yaml
from conda_build import api, exceptions, variants
from conda_build.utils import package_has_file, FileNotFoundError
thisdir = os.path.dirname(__file__)
recipe_dir = os.path.join(thisdir, 'test-recipes', 'variants')
def test_later_spec_priority(single_version, no_numpy_version):
# override a single key
specs = OrderedDict()
specs['no_numpy'] = no_numpy_version
specs['single_ver'] = single_version
combined_spec = variants.combine_specs(specs)
assert len(combined_spec) == 2
assert combined_spec["python"] == ["2.7.*"]
# keep keys that are not overwritten
specs = OrderedDict()
specs['single_ver'] = single_version
specs['no_numpy'] = no_numpy_version
combined_spec = variants.combine_specs(specs)
assert len(combined_spec) == 2
assert len(combined_spec["python"]) == 2
def test_get_package_variants_from_file(testing_workdir, testing_config, no_numpy_version):
with open('variant_example.yaml', 'w') as f:
yaml.dump(no_numpy_version, f, default_flow_style=False)
testing_config.variant_config_files = [os.path.join(testing_workdir, 'variant_example.yaml')]
testing_config.ignore_system_config = True
metadata = api.render(os.path.join(thisdir, "variant_recipe"),
no_download_source=False, config=testing_config)
# one for each Python version. Numpy is not strictly pinned and should present only 1 dimension
assert len(metadata) == 2
assert sum('python >=2.7,<2.8' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
assert sum('python >=3.5,<3.6' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
def test_use_selectors_in_variants(testing_workdir, testing_config):
testing_config.variant_config_files = [os.path.join(recipe_dir,
'selector_conda_build_config.yaml')]
variants.get_package_variants(testing_workdir, testing_config)
def test_get_package_variants_from_dictionary_of_lists(testing_config, no_numpy_version):
testing_config.ignore_system_config = True
metadata = api.render(os.path.join(thisdir, "variant_recipe"),
no_download_source=False, config=testing_config,
variants=no_numpy_version)
# one for each Python version. Numpy is not strictly pinned and should present only 1 dimension
assert len(metadata) == 2, metadata
assert sum('python >=2.7,<2.8' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
assert sum('python >=3.5,<3.6' in req for (m, _, _) in metadata
for req in m.meta['requirements']['run']) == 1
@pytest.mark.xfail(reason="Strange failure 7/19/2017. Can't reproduce locally. Test runs fine "
"with parallelism and everything. Test fails reproducibly on CI, but logging "
"into appveyor after failed run, test passes. =(")
def test_variant_with_ignore_numpy_version_reduces_matrix(numpy_version_ignored):
# variants are defined in yaml file in this folder
# there are two python versions and two numpy versions. However, because numpy is not pinned,
# the numpy dimensions should get collapsed.
recipe = os.path.join(recipe_dir, '03_numpy_matrix')
metadata = api.render(recipe, variants=numpy_version_ignored, finalize=False)
assert len(metadata) == 2, metadata
def test_variant_with_numpy_pinned_has_matrix():
recipe = os.path.join(recipe_dir, '04_numpy_matrix_pinned')
metadata = api.render(recipe, finalize=False)
assert len(metadata) == 4
def test_pinning_in_build_requirements():
recipe = os.path.join(recipe_dir, '05_compatible')
metadata = api.render(recipe)[0][0]
build_requirements = metadata.meta['requirements']['build']
# make sure that everything in the build deps is exactly pinned
assert all(len(req.split(' ')) == 3 for req in build_requirements)
@pytest.mark.sanity
def test_no_satisfiable_variants_raises_error():
recipe = os.path.join(recipe_dir, '01_basic_templating')
with pytest.raises(exceptions.DependencyNeedsBuildingError):
api.render(recipe, permit_unsatisfiable_variants=False)
# the packages are not installable anyway, so this should show a warning that recipe can't
# be finalized
api.render(recipe, permit_unsatisfiable_variants=True)
# out, err = capsys.readouterr()
# print(out)
# print(err)
# print(caplog.text)
# assert "Returning non-final recipe; one or more dependencies was unsatisfiable" in err
def test_zip_fields():
"""Zipping keys together allows people to tie different versions as sets of combinations."""
v = {'python': ['2.7', '3.5'], 'vc': ['9', '14'], 'zip_keys': [('python', 'vc')]}
ld = variants.dict_of_lists_to_list_of_dicts(v)
assert len(ld) == 2
assert ld[0]['python'] == '2.7'
assert ld[0]['vc'] == '9'
assert ld[1]['python'] == '3.5'
assert ld[1]['vc'] == '14'
# allow duplication of values, but lengths of lists must always match
v = {'python': ['2.7', '2.7'], 'vc': ['9', '14'], 'zip_keys': [('python', 'vc')]}
ld = variants.dict_of_lists_to_list_of_dicts(v)
assert len(ld) == 2
assert ld[0]['python'] == '2.7'
assert ld[0]['vc'] == '9'
assert ld[1]['python'] == '2.7'
assert ld[1]['vc'] == '14'
# mismatched lengths should raise an error
v = {'python': ['2.7', '3.5', '3.4'], 'vc': ['9', '14'], 'zip_keys': [('python', 'vc')]}
with pytest.raises(ValueError):
ld = variants.dict_of_lists_to_list_of_dicts(v)
# WHEN one is completely missing, it's OK. The zip_field for the set gets ignored.
v = {'python': ['2.7', '3.5'], 'zip_keys': [('python', 'vc')]}
ld = variants.dict_of_lists_to_list_of_dicts(v)
assert len(ld) == 2
assert 'vc' not in ld[0].keys()
assert 'vc' not in ld[1].keys()
def test_cross_compilers():
recipe = os.path.join(recipe_dir, '09_cross')
ms = api.render(recipe, permit_unsatisfiable_variants=True, finalize=False, bypass_env_check=True)
assert len(ms) == 3
def test_variants_in_output_names():
recipe = os.path.join(recipe_dir, '11_variant_output_names')
outputs = api.get_output_file_paths(recipe)
assert len(outputs) == 4
def test_variants_in_versions_with_setup_py_data(testing_workdir):
recipe = os.path.join(recipe_dir, '12_variant_versions')
try:
outputs = api.get_output_file_paths(recipe)
assert len(outputs) == 2
assert any(os.path.basename(pkg).startswith('my_package-470.470') for pkg in outputs)
assert any(os.path.basename(pkg).startswith('my_package-480.480') for pkg in outputs)
except FileNotFoundError:
# problem with python 3.x with Travis CI somehow. Just ignore it.
print("Ignoring test on setup.py data - problem with download")
def test_git_variables_with_variants(testing_workdir, testing_config):
recipe = os.path.join(recipe_dir, '13_git_vars')
m = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)[0][0]
assert m.version() == "1.20.2"
assert m.build_number() == 0
def test_variant_input_with_zip_keys_keeps_zip_keys_list():
variants_ = {'scipy': ['0.17', '0.19'], 'sqlite': ['3'], 'zlib': ['1.2'], 'xz': ['5'],
'zip_keys': ['macos_min_version', 'macos_machine', 'MACOSX_DEPLOYMENT_TARGET',
'CONDA_BUILD_SYSROOT'],
'pin_run_as_build': {'python': {'min_pin': 'x.x', 'max_pin': 'x.x'}}}
variant_list = variants.dict_of_lists_to_list_of_dicts(variants_,
extend_keys=variants.DEFAULT_VARIANTS['extend_keys'])
assert len(variant_list) == 2
assert 'zip_keys' in variant_list[0] and variant_list[0]['zip_keys']
@pytest.mark.serial
@pytest.mark.xfail(sys.platform=='win32', reason="console readout issues on appveyor")
def test_ensure_valid_spec_on_run_and_test(testing_workdir, testing_config, caplog):
testing_config.debug = True
testing_config.verbose = True
recipe = os.path.join(recipe_dir, '14_variant_in_run_and_test')
api.render(recipe, config=testing_config)
text = caplog.text
assert "Adding .* to spec 'pytest 3.2'" in text
assert "Adding .* to spec 'click 6'" in text
assert "Adding .* to spec 'pytest-cov 2.3'" not in text
assert "Adding .* to spec 'pytest-mock 1.6'" not in text
def test_serial_builds_have_independent_configs(testing_config):
recipe = os.path.join(recipe_dir, '17_multiple_recipes_independent_config')
recipes = [os.path.join(recipe, dirname) for dirname in ('a', 'b')]
outputs = api.build(recipes, config=testing_config)
index_json = json.loads(package_has_file(outputs[0], 'info/index.json'))
assert 'bzip2 >=1,<1.0.7.0a0' in index_json['depends']
index_json = json.loads(package_has_file(outputs[1], 'info/index.json'))
assert 'bzip2 >=1.0.6,<2.0a0' in index_json['depends']
def test_subspace_selection(testing_config):
recipe = os.path.join(recipe_dir, '18_subspace_selection')
testing_config.variant = {'a': 'coffee'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# there are two entries with a==coffee, so we should end up with 2 variants
assert len(ms) == 2
# ensure that the zipped keys still agree
assert sum(m.config.variant['b'] == '123' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'abc' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'concrete' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'mooo' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'baaa' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'woof' for m, _, _ in ms) == 0
# test compound selection
testing_config.variant = {'a': 'coffee', 'b': '123'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# there are two entries with a==coffee, but one with both 'coffee' for a, and '123' for b,
# so we should end up with 1 variants
assert len(ms) == 1
# ensure that the zipped keys still agree
assert sum(m.config.variant['b'] == '123' for m, _, _ in ms) == 1
assert sum(m.config.variant['b'] == 'abc' for m, _, _ in ms) == 0
assert sum(m.config.variant['b'] == 'concrete' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'mooo' for m, _, _ in ms) == 1
assert sum(m.config.variant['c'] == 'baaa' for m, _, _ in ms) == 0
assert sum(m.config.variant['c'] == 'woof' for m, _, _ in ms) == 0
# test when configuration leads to no valid combinations - only c provided, and its value
# doesn't match any other existing values of c, so it's then ambiguous which zipped
# values to choose
testing_config.variant = {'c': 'not an animal'}
with pytest.raises(ValueError):
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
# all zipped keys provided by the new variant. It should clobber the old one.
testing_config.variant = {'a': 'some', 'b': 'new', 'c': 'animal'}
ms = api.render(recipe, config=testing_config, finalize=False, bypass_env_check=True)
assert len(ms) == 1
assert ms[0][0].config.variant['a'] == 'some'
assert ms[0][0].config.variant['b'] == 'new'
assert ms[0][0].config.variant['c'] == 'animal'
def test_get_used_loop_vars(testing_config):
m = api.render(os.path.join(recipe_dir, '19_used_variables'), finalize=False, bypass_env_check=True)[0][0]
# conda_build_config.yaml has 4 loop variables defined, but only 3 are used.
# python and zlib are both implicitly used (depend on name matching), while
# some_package is explicitly used as a jinja2 variable
assert m.get_used_loop_vars() == {'python', 'some_package'}
# these are all used vars - including those with only one value (and thus not loop vars)
assert m.get_used_vars() == {'python', 'some_package', 'zlib', 'pthread_stubs'}
def test_reprovisioning_source(testing_config):
ms = api.render(os.path.join(recipe_dir, '20_reprovision_source'))
def test_reduced_hashing_behavior(testing_config):
# recipes using any compiler jinja2 function need a hash
m = api.render(os.path.join(recipe_dir, '26_reduced_hashing', 'hash_yes_compiler'),
finalize=False, bypass_env_check=True)[0][0]
assert 'c_compiler' in m.get_hash_contents(), "hash contents should contain c_compiler"
assert re.search('h[0-9a-f]{%d}' % testing_config.hash_length, m.build_id()), \
"hash should be present when compiler jinja2 function is used"
# recipes that use some variable in conda_build_config.yaml to control what
# versions are present at build time also must have a hash (except
# python, r_base, and the | |
<gh_stars>10-100
"""
Code generator for ECOS C folder.
Spits out
Makefile
socp2prob.(c/h)
prob2socp.(c/h)
Will copy files to a folder with name "name". Produce a Makefile that compiles
the object files. Only really need one copy of qcml_utils across all generated
code....
Need to compile qcml_utils.c to a qcml_utils.o, but only need to link it into
the matrix stuffing object.
Links with ECOS library.
An easy way to stuff the matrices (without calling "compress" on the
triplet form) is to write the matrix column by column, but I haven't
done this yet. It would be a slight improvement in performance.
"""
import os, shutil, site, math
from collections import Counter
from .. base_codegen import Codegen, CodegenVariable
from ... mixins import RestrictedMultiplyMixin
from ... ast.expressions import expression
from ... properties import shape
from ... properties.curvature import isconstant
from ... codes import OnesCoeff, ConstantCoeff
from ... codes.function import CFunction
from ... codes.encoders import toC
from ... properties.abstract_dim import AbstractDim
def write_template(template_file, new_file, code):
with open(template_file, 'r') as template, open(new_file, 'w') as output:
for line in template:
output.write(line % (code))
def shape_to_c_type(x):
if isinstance(x, CodegenVariable):
if x.length == 1:
return "double"
else:
return "double *"
else:
if shape.isscalar(x): return "double"
if shape.isvector(x): return "double *"
if shape.ismatrix(x): return "qc_matrix *"
raise Exception("Unknown shape...")
class C_Codegen(RestrictedMultiplyMixin, Codegen):
""" This produces two functions and a header file.
"""
def __init__(self):
super(C_Codegen, self).__init__()
# TODO: allow optimizations with given sparsity pattern
# functions we are going to generate
self._code = {}
self._code['prob2socp'] = CFunction("qc_{name}2socp",
arguments = ["const {name}_params * params",
"const {name}_dims * dims"],
ret_type="qc_socp *")
self._code['socp2prob'] = CFunction("qc_socp2{name}",
arguments = ["const double * x", "const double * y", "const double * z", "{name}_vars * vars",
"const {name}_dims * dims"])
self._codekeyorder = ['prob2socp', 'socp2prob']
# parameters and variables in the optimization problem
self.params = ""
self.abstract_dims = ""
self.variables = ""
self.indent = self.prob2socp.indent # set our indent spacing
# keep track of the total nonzeros in each matrix
self.nnz = {'G': [], 'A': []}
# keep track of the problem dimension
self.size_lookup = {'m': 0, 'n': 0, 'p': 0}
@property
def prob2socp(self): return self.code['prob2socp']
@property
def socp2prob(self): return self.code['socp2prob']
@property
def extension(self):
return ".c"
def save(self, name):
# get the paths to the template files
data_dir = os.path.dirname(__file__)
path = os.getcwd()
makefile_template = "{data_dir}/Makefile_template".format(**vars())
source_file_template = "{data_dir}/stuff_template.c".format(**vars())
header_file_template = "{data_dir}/stuff_template.h".format(**vars())
new_dir = "{path}/{name}".format(**vars())
makefile = "{new_dir}/Makefile".format(**vars())
source_file = "{new_dir}/{name}.c".format(**vars())
header_file = "{new_dir}/{name}.h".format(**vars())
# create the dictionary for the generated code
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# populate the dict needed for codegen
codegen_dict = {
'name': name,
'NAME': name.upper(),
'params': self.params,
'dims': self.abstract_dims,
'variables': '\n'.join(self.variables),
# the name of the source isn't known until this point, so we
# interpolate the string and insert it here
'prob2socp': self.prob2socp.source.format(name=name),
'socp2prob': self.socp2prob.source.format(name=name),
'prob2socp_prototype': self.prob2socp.prototype.format(name=name),
'socp2prob_prototype': self.socp2prob.prototype.format(name=name)
}
# copy over the static utility files
shutil.copy("{0}/qcml_utils.c".format(data_dir), new_dir)
shutil.copy("{0}/qcml_utils.h".format(data_dir), new_dir)
# write out the files
write_template(makefile_template, makefile, codegen_dict)
write_template(header_file_template, header_file, codegen_dict)
write_template(source_file_template, source_file, codegen_dict)
# generator to get cone sizes
def c_dimensions(self):
self.size_lookup['m'] = self.num_conic + self.num_lps
self.size_lookup['n'] = self.num_vars
self.size_lookup['p'] = self.num_lineqs
yield "data->p = %s;" % self.num_lineqs
yield "data->m = %s;" % (self.num_conic + self.num_lps)
yield "data->n = %s;" % self.num_vars
# generator to get cone dimensions
def c_cone_sizes(self):
if self.cone_list:
num_cone, cone_size = zip(*self.cone_list)
else:
num_cone, cone_size = [0], 0
yield "data->l = %s;" % self.num_lps
yield "data->nsoc = %s;" % sum(num_cone)
if sum(num_cone) == 0:
yield "data->q = NULL;"
else:
yield "data->q = (long *) malloc(data->nsoc * sizeof(long));"
yield "if(!data->q) return qc_socp_free(data);"
yield ""
yield "/* initialize the cone */"
yield "q_ptr = data->q;"
for num, sz in self.cone_list:
if num == 1: yield "*q_ptr++ = %s;" % sz
else: yield "for(i = 0; i < %s; ++i) *q_ptr++ = %s;" % (num, sz)
# function to get parameters
def c_params(self):
return ["%s%s %s;" % (self.indent, shape_to_c_type(v),k) for (k,v) in self.program.parameters.iteritems()]
# function to get abstract dims
def c_dims(self):
if self.program.abstract_dims:
return ["%slong %s;" % (self.indent, k) for k in self.program.abstract_dims]
else:
return ["%sconst char SENTINEL; /* empty dims struct */" % self.indent]
# function to get variables
def c_variables(self):
for (k,v) in self.program.variables.iteritems():
shape_type = shape_to_c_type(v)
if shape_type == "double *":
shape_type = "const double *"
yield "%s%s %s;" % (self.indent, shape_type,k)
for (k,v) in self.dual_equality_vars.iteritems():
shape_type = shape_to_c_type(v)
if shape_type == "double *":
shape_type = "const double *"
yield "%s%s %s;" % (self.indent, shape_type,k)
for (k,v) in self.dual_conic_vars.iteritems():
shape_type = shape_to_c_type(v)
if shape_type == "double *":
shape_type = "const double *"
yield "%s%s %s;" % (self.indent, shape_type,k)
# generator to allocate socp data structures
def c_allocate_socp(self):
yield "qc_socp * data = (qc_socp *) calloc(1, sizeof(qc_socp));"
yield "if (!data) return qc_socp_free(data);"
# generator to allocate vectors
def c_allocate_vector(self, vector, size):
if self.size_lookup[size] == 0:
yield "data->%s = NULL;" % vector
else:
yield "data->%s = (double *) calloc(data->%s, sizeof(double));" % (vector, size)
yield "if (!data->%s) return qc_socp_free(data);" % (vector)
def c_allocate_matrix(self, matrix):
const = sum(int(x) for x in self.nnz[matrix] if x.isdigit())
expr_counts = Counter(x for x in self.nnz[matrix] if not x.isdigit())
size = ' + '.join('%d*%s' % (v,k) for k,v in expr_counts.iteritems())
if const > 0: size = "%s + %d" % (size, const)
if const > 0 or size:
yield "nnz%s = %s;" % (matrix, size)
yield "data->%(matrix)sx = (double *) malloc(nnz%(matrix)s * sizeof(double));" % {'matrix': matrix}
yield "data->%(matrix)sp = (long *) malloc(nnz%(matrix)s * sizeof(long));" % {'matrix': matrix}
yield "data->%(matrix)si = (long *) malloc(nnz%(matrix)s * sizeof(long));" % {'matrix': matrix}
yield "if ((!data->%(matrix)sx) || (!data->%(matrix)sp) || (!data->%(matrix)si)) return qc_socp_free(data);" % {'matrix': matrix}
else:
yield "nnz%s = 0;" % (matrix)
yield "data->%sx = NULL;" % (matrix)
yield "data->%sp = NULL;" % (matrix)
yield "data->%si = NULL;" % (matrix)
yield "%(matrix)s_data_ptr = data->%(matrix)sx;" % {'matrix': matrix}
yield "%(matrix)s_row_ptr = data->%(matrix)si;" % {'matrix': matrix}
yield "%(matrix)s_col_ptr = data->%(matrix)sp;" % {'matrix': matrix}
def c_setup_qc_matrix(self, matrix):
if self.nnz[matrix]:
rows = "data->m" if matrix == "G" else "data->p"
#yield "%s_coo = (qc_matrix *) malloc(sizeof(qc_matrix));" % matrix
#yield "if (!%s_coo) return qc_socp_free(data);" % matrix
yield "%s_coo.m = %s; %s_coo.n = data->n; %s_coo.nnz = nnz%s;" % (matrix, rows, matrix, matrix, matrix)
yield "%s_coo.i = data->%si;" % (matrix, matrix)
yield "%s_coo.j = data->%sp;" % (matrix, matrix)
yield "%s_coo.v = data->%sx;" % (matrix, matrix)
def c_compress(self, matrix):
if self.nnz[matrix]:
yield "%s_csc = qc_compress(&%s_coo);" % (matrix, matrix)
yield "if (!%s_csc) return qc_socp_free(data);" % matrix
yield "/* free memory used for COO matrix, so it can be reassigned later */"
yield "free(data->%si);" % matrix
yield "free(data->%sp);" % matrix
yield "free(data->%sx);" % matrix
yield "/* reassign into data, pointer now owned by data */"
yield "data->%si = %s_csc->i;" % (matrix, matrix)
yield "data->%sp = %s_csc->j;" % (matrix, matrix)
yield "data->%sx = %s_csc->v;" % (matrix, matrix)
yield "/* only free temp CSC pointer, but not its data */"
yield "free(%s_csc);" % (matrix)
yield ""
def c_recover(self):
for k in self.program.variables.keys():
start, length = self.primal_vars[k]
if length == 1:
yield "vars->%s = *(x + %s);" % (k, start)
else:
yield "vars->%s = x + %s; /* length %s */" % (k, start, length)
for k in self.dual_equality_vars.keys():
start, length = self.dual_equality_vars[k]
if length == 1:
yield "vars->%s = *(y + %s);" % (k, start)
else:
yield "vars->%s = y + %s; /* length %s */" % (k, start, length)
for k in self.dual_conic_vars.keys():
start, length = self.dual_conic_vars[k]
if length == 1:
yield "vars->%s = *(z + %s);" % (k, start)
else:
yield "vars->%s = z + %s; /* length %s */" % (k, start, length)
def functions_setup(self):
# add some documentation
self.prob2socp.document("maps 'params' into the C socp data type")
self.prob2socp.document("'params' ought to contain:")
self.prob2socp.document(self.printshapes(self.program))
self.prob2socp.newline()
self.params = '\n'.join(self.c_params())
self.abstract_dims = '\n'.join(self.c_dims())
# join later, because the dual variables aren't yet populated
self.variables = self.c_variables()
self.prob2socp.add_comment("all local variables")
self.prob2socp.add_lines("long i; /* loop index */")
self.prob2socp.add_lines("long *q_ptr;")
self.prob2socp.add_lines("long *A_row_ptr, *A_col_ptr;")
self.prob2socp.add_lines("long *G_row_ptr, *G_col_ptr;")
self.prob2socp.add_lines("double *A_data_ptr, *G_data_ptr;")
self.prob2socp.add_lines("long nnzA, nnzG;")
self.prob2socp.add_lines("qc_matrix *G_csc, *A_csc; /* possibly un-used */")
self.prob2socp.add_lines("qc_matrix G_coo, A_coo; /* possibly un-used */")
self.prob2socp.newline()
self.prob2socp.add_comment("allocate socp data structure")
self.prob2socp.add_lines(self.c_allocate_socp())
self.prob2socp.newline()
# set up the | |
#Standard python libraries
import os
import warnings
import copy
import time
import itertools
import functools
#Dependencies - numpy, scipy, matplotlib, pyfftw
import numpy as np
import matplotlib.pyplot as plt
import pyfftw
from pyfftw.interfaces.numpy_fft import fft, fftshift, ifft, ifftshift, fftfreq
from scipy.interpolate import interp1d as sinterp1d
import scipy
from ufss import DiagramGenerator
from ufss.UF2.heaviside_convolve import HeavisideConvolve
def set_identical_efields(obj):
"""This should contain more"""
obj.efields = []
class psi_container:
def __init__(self,t,psi,bool_mask,pulse_number,manifold_key,*,interp_kind='linear',
interp_left_fill=0):
self.bool_mask = bool_mask
self.pulse_number = pulse_number
self.manifold_key = manifold_key
if t.size == 1:
n, M = psi.shape
self.M = M+2
self.n = n
self.t = np.array([-1,0,1],dtype='float') * np.spacing(t[0]) + t[0]
psi_new = np.zeros((n,3),dtype='complex')
psi_new[:,0] = interp_left_fill
psi_new[:,1] = (1 + interp_left_fill)/2 * psi[:,0]
psi_new[:,2] = psi[:,0]
self.asymptote = psi[:,-1]
self.psi = psi_new
self._psi = psi_new
self.psi_fun = self.impulsive_psifun(self.asymptote,left_fill = interp_left_fill)
else:
self.t = t
self.psi = psi
self._psi = self.extend(psi,left_fill = interp_left_fill)
self.psi_fun = self.make_interpolant(kind=interp_kind,
left_fill=interp_left_fill)
def extend(self,psi,*,left_fill = 0):
n, M = psi.shape
self.M = M
self.n = n
new_psi = np.zeros((n,3*M),dtype='complex')
new_psi[:,0:M] = left_fill
new_psi[:,M:2*M] = psi
asymptote = psi[:,-1]
self.asymptote = asymptote
new_psi[:,2*M:] = asymptote[:,np.newaxis]
return new_psi
def make_interpolant(self,*, kind='cubic', left_fill=0):
"""Interpolates density matrix and pads using 0 to the left
and psi[-1] to the right
"""
left_fill = np.ones(self.n,dtype='complex')*left_fill
right_fill = self.psi[:,-1]
return sinterp1d(self.t,self.psi,fill_value = (left_fill,right_fill),
assume_sorted=True,bounds_error=False,kind=kind)
def impulsive_psifun(self,asymptote,left_fill=0):
if left_fill == 0:
def f(t):
return asymptote[:,np.newaxis] * np.heaviside(t-self.t[1],0.5)[np.newaxis,:]
else:
def f(t):
try:
return asymptote[:,np.newaxis] * np.ones(len(t))[np.newaxis,:]
except:
return asymptote[:,np.newaxis]
return f
def __call__(self,t):
if type(t) is np.ndarray:
if t[0] > self.t[-1]:
if t.size <= self.M:
ans = self._psi[:,-t.size:].copy()
else:
ans = np.ones(t.size,dtype='complex')[np.newaxis,:] * self.asymptote[:,np.newaxis]
elif t[-1] < self.t[0]:
if t.size <= self.M:
ans = self._psi[:,:t.size].copy()
else:
ans = np.zeros((self.n,t.size),dtype='complex')
elif t.size == self.M:
if np.allclose(t,self.t):
ans = self.psi.copy()
else:
ans = self.psi_fun(t)
else:
ans = self.psi_fun(t)
else:
ans = self.psi_fun(t)
return ans
def __getitem__(self,inds):
return self._psi[:,inds].copy()
class Wavepackets(DiagramGenerator):
"""This class is designed to calculate perturbative wavepackets in the
light-matter interaction given the eigenvalues of the unperturbed
hamiltonian and the material dipole operator evaluated in the
eigenbasis of the unperturbed hamiltonian.
Args:
file_path (string): path to folder containing eigenvalues and the
dipole operator for the system Hamiltonian
num_conv_points (int): number of desired points for linear
convolution. Also number of points used to resolve all optical
pulse shapes
dt (float): time spacing used to resolve the shape of all optical
pulses
initial_state (int): index of initial state for psi^0
"""
def __init__(self,file_path,*, num_conv_points=41,
initial_state=0, total_num_time_points = None,
detection_type = 'polarization'):
self.slicing_time = 0
self.interpolation_time = 0
self.expectation_time = 0
self.next_order_expectation_time = 0
self.convolution_time = 0
self.extend_time = 0
self.mask_time = 0
self.dipole_time = 0
self.automation_time = 0
self.diagram_to_signal_time = 0
self.base_path = file_path
self.undersample_factor = 1
self.gamma_res = 6.91
self.initial_state = initial_state
self.load_eigensystem()
self.load_mu()
if detection_type == 'polarization':
self.psi_to_signal = self.polarization_detection_signal
self.return_complex_signal = False
elif detection_type == 'complex_polarization':
self.psi_to_signal = self.polarization_detection_signal
self.return_complex_signal = True
detection_type = 'polarization'
elif detection_type == 'integrated_polarization':
self.psi_to_signal = self.integrated_polarization_detection_signal
elif detection_type == 'fluorescence':
self.psi_to_signal = self.fluorescence_detection_signal
self.f_yield = f_yield #quantum yield of doubly excited manifold relative to singly excited manifold
DiagramGenerator.__init__(self,detection_type=detection_type)
self.K_dict = {'u':self.up,'d':self.down}
# Code will not actually function until the following empty lists are set by the user
self.efields = [] #initialize empty list of electric field shapes
self.efield_times = [] #initialize empty list of times assoicated with each electric field shape
self.dts = [] #initialize empty list of time spacings associated with each electric field shape
self.polarization_sequence = [] #initialize empty polarization sequence
self.pulse_times = [] #initialize empty list of pulse arrival times
self.centers = [] #initialize empty list of pulse center frequencies
self.efield_wavevectors = []
self.heaviside_convolve_list = []
self.psis = dict()
def set_pulse_delays(self,all_delays):
"""Must be a list of numpy arrays, where each array is a
list of delay times between pulses
"""
self.all_pulse_delays = all_delays
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
if num_delays == num_pulses - 1:
pass
elif num_delays == num_pulses - 2 and self.detection_type == 'polarization':
# If there is a local oscillator, it arrives simultaneously with the last pulse
self.all_pulse_delays.append(np.array([0]))
elif num_delays <= num_pulses -2:
raise Exception('There are not enough delay times')
elif num_delays >= num_pulses:
raise Exception('There are too many delay times')
def calculate_diagrams_all_delays(self,diagrams):
t0 = time.time()
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
all_delay_combinations = list(itertools.product(*self.all_pulse_delays))
signal_shape = [delays.size for delays in self.all_pulse_delays]
if self.detection_type == 'polarization':
signal = np.zeros((len(all_delay_combinations),self.w.size),dtype='complex')
if len(signal_shape) == self.pdc.shape[0]:
# get rid of the "delay" between the last pulse and the local oscillator
signal_shape[-1] = self.w.size
elif len(signal_shape) == self.pdc.shape[0] - 1:
# append the shape of the polariation-detection axis
signal_shape.append(self.w.size)
else:
raise Exception('Cannot automatically determine final signal shape')
else:
signal = np.zeros((len(all_delay_combinations)),dtype='complex')
counter = 0
for delays in all_delay_combinations:
arrival_times = [0]
for delay in delays:
arrival_times.append(arrival_times[-1]+delay)
if self.detection_type == 'polarization':
signal[counter,:] = self.calculate_diagrams(diagrams,arrival_times)
else:
signal[counter] = self.calculate_diagrams(diagrams,arrival_times)
counter += 1
self.signal = signal.reshape(signal_shape)
self.calculation_time = time.time() - t0
return self.signal
def calculate_signal_all_delays(self):
t0 = time.time()
num_delays = len(self.all_pulse_delays)
num_pulses = len(self.efields)
all_delay_combinations = list(itertools.product(*self.all_pulse_delays))
signal_shape = [delays.size for delays in self.all_pulse_delays]
if self.detection_type == 'polarization':
signal = np.zeros((len(all_delay_combinations),self.w.size),dtype='complex')
if len(signal_shape) == self.pdc.shape[0]:
# get rid of the "delay" between the last pulse and the local oscillator
signal_shape[-1] = self.w.size
elif len(signal_shape) == self.pdc.shape[0] - 1:
# append the shape of the polariation-detection axis
signal_shape.append(self.w.size)
else:
raise Exception('Cannot automatically determine final signal shape')
else:
signal = np.zeros((len(all_delay_combinations)),dtype='complex')
counter = 0
for delays in all_delay_combinations:
arrival_times = [0]
for delay in delays:
arrival_times.append(arrival_times[-1]+delay)
if self.detection_type == 'polarization':
signal[counter,:] = self.calculate_signal(arrival_times)
else:
signal[counter] = self.calculate_signal(arrival_times)
counter += 1
self.signal = signal.reshape(signal_shape)
self.calculation_time = time.time() - t0
return self.signal
def set_t(self,optical_dephasing_rate,*,dt='auto'):
"""Sets the time grid upon which all frequency-detected signals will
be calculated on
"""
max_pos_t = int(self.gamma_res/optical_dephasing_rate)
max_efield_t = max([np.max(u) for u in self.efield_times]) * 1.05
max_pos_t = max(max_pos_t,max_efield_t)
if dt == 'auto':
dt = self.dts[-1] # signal detection bandwidth determined by local oscillator
n = int(max_pos_t/dt)
self.t = np.arange(-n,n+1,1)*dt
self.w = fftshift(fftfreq(self.t.size,d=dt)*2*np.pi)
def execute_diagram(self,instructions):
num_instructions = len(instructions['ket']) + len(instructions['bra'])
ket = self.psi0
bra = self.psi0
ketname = ''
braname = ''
ket_instructions = instructions['ket']
bra_instructions = instructions['bra']
for i in range(len(ket_instructions)):
key, num = ket_instructions[i]
ketname += key+str(num)
# Try to re-use previous calculations, if they exist
try:
new_ket = self.psis[ketname]
except KeyError:
new_ket = self.K_dict[key](ket,pulse_number=num)
self.psis[ketname] = new_ket
ket = new_ket
for i in range(len(bra_instructions)):
key, num = bra_instructions[i]
braname += key+str(num)
# Try to re-use previous calculations, if they exist
try:
new_bra = self.psis[braname]
except KeyError:
new_bra = self.K_dict[key](bra,pulse_number=num)
self.psis[braname] = new_bra
bra = new_bra
sig = self.psi_to_signal(bra,ket)
return sig
def remove_psis_by_pulse_number(self,pulse_number):
num = str(pulse_number)
keys = self.psis.keys()
keys_to_remove = []
for key in keys:
flag = key.find(num)
if flag >= 0:
keys_to_remove.append(key)
for key in keys_to_remove:
self.psis.pop(key)
def set_identical_gaussians(self,sigma_t,c,phase_discrimination):
"""
"""
L = len(phase_discrimination) # number of pulses
# Delta = 10 and M = 41 hard-coded in
efield_t = np.linspace(-5,5,num=41)
times = [efield_t] * L
self.set_polarization_sequence(['x'] * L)
centers = [c] * L
ef = np.exp(-efield_t**2/(2*sigma_t**2))
efields = [ef for i in L]
self.set_efields(times,efields,centers,phase_discrimination,
reset_psis = True,plot_fields = False)
def set_current_diagram_instructions(self,arrival_times):
self.current_instructions = self.get_wavefunction_diagrams(arrival_times)
def calculate_signal(self,arrival_times):
t0 = time.time()
try:
old_pulse_times = self.pulse_times
for i in range(len(old_pulse_times)):
if old_pulse_times[i] != arrival_times[i]:
self.remove_psis_by_pulse_number(i)
except AttributeError:
pass
self.pulse_times = arrival_times
self.set_current_diagram_instructions(arrival_times)
diagram_instructions = self.current_instructions
if len(diagram_instructions) == 0:
print(arrival_times)
t1 = time.time()
try:
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
except IndexError:
signal = 0
t2 = time.time()
self.automation_time += t1-t0
self.diagram_to_signal_time += t2-t1
return signal
def calculate_diagrams(self,diagram_instructions,arrival_times):
try:
old_pulse_times = self.pulse_times
for i in range(len(old_pulse_times)):
if old_pulse_times[i] != arrival_times[i]:
self.remove_psis_by_pulse_number(i)
except AttributeError:
pass
self.pulse_times = arrival_times
self.current_instructions = diagram_instructions
instructions = diagram_instructions[0]
signal = self.execute_diagram(instructions)
for instructions in diagram_instructions[1:]:
signal += self.execute_diagram(instructions)
return signal
def polarization_detection_signal(self,bra_dict,ket_dict):
p_of_t = self.dipole_expectation(bra_dict,ket_dict,pulse_number = -1)
return self.polarization_to_signal(p_of_t,local_oscillator_number=-1)
def integrated_polarization_detection_signal(self,bra_dict,ket_dict):
p = self.integrated_dipole_expectation(bra_dict,ket_dict,pulse_number=-1)
return self.integrated_polarization_to_signal(p,local_oscillator_number=-1)
def fluorescence_detection_signal(self,bra_dict,ket_dict,*,time_index = -1):
"""Calculate inner product given an input bra and ket dictionary
| |
pylint: disable-msg=too-many-arguments
def show_slide(self, slide_name: str, transition: Optional[str] = None,
key: Optional[str] = None, force: bool = False, priority: int = 0,
show: Optional[bool] = True, expire: Optional[float] = None,
play_kwargs: Optional[dict] = None, **kwargs) -> bool:
"""
Request to show the specified slide. Many of the slide parameters may be overridden
using the arguments for this function.
Args:
slide_name: The name of the slide.
transition: The slide transition (overrides any stored in the slide).
key: The slide key.
force: When true, the slide will be displayed regardless of the priority of the
current slide.
priority: The priority of the slide to show.
show: Whether or not to actually show the slide.
expire: Expiration time (in seconds) after which the slide will be automatically
removed (overrides value stored in the slide).
play_kwargs: Kwargs related to playing/displaying the slide.
**kwargs: Additional kwargs (will override settings in the play_kwargs parameter).
Returns:
True is the slide will be shown, False otherwise.
"""
# TODO: Is the show parameter really needed? Why call show_slide and not show the slide?
if not play_kwargs:
play_kwargs = kwargs
else:
play_kwargs.update(kwargs)
if self.has_screen(slide_name):
slide = self.get_screen(slide_name)
else:
slide = self.add_slide(name=slide_name,
config=self.mc.slides[slide_name],
priority=priority,
key=key,
play_kwargs=play_kwargs)
# update the widgets with whatever kwargs came through here
if play_kwargs:
for widget in slide.walk():
try:
widget.update_kwargs(**play_kwargs)
except AttributeError:
pass
if not transition:
try: # anon slides are in the collection
transition = self.mc.slides[slide_name]['transition']
except KeyError:
pass
# If there's an expire kwarg, that takes priority over slide's expire
if expire:
slide.schedule_removal(expire)
elif slide.expire:
slide.schedule_removal(slide.expire)
if (slide.priority >= self.current_slide.priority and show) or force:
# We need to show this slide
# Have to set a transition even if there's not one because we have
# to remove whatever transition was last used
self.transition.stop()
self.transition = self.mc.transition_manager.get_transition(transition)
self._set_current_slide(slide)
return True
else:
# Not showing this slide
return False
# pylint: disable-msg=too-many-arguments
def add_and_show_slide(self, widgets: Optional[dict] = None,
slide_name: Optional[str] = None,
transition: Optional[str] = None, priority: int = 0,
key: Optional[str] = None, force: bool = False,
background_color=None,
expire: Optional[float] = None, play_kwargs=None,
**kwargs) -> bool:
"""Create and show the slide.
If a slide with this name already exists, it will be replaced.
Args:
widgets: An optional dictionary of widgets to add to the slide.
slide_name: The name of the slide.
transition: The slide transition (overrides any stored in the slide).
force: When true, the slide will be displayed regardless of the priority of the
current slide.
key: The slide key.
priority: The priority of the slide to show.
expire: Expiration time (in seconds) after which the slide will be automatically
removed (overrides value stored in the slide).
play_kwargs: Kwargs related to playing/displaying the slide.
**kwargs: Additional kwargs (will override settings in the play_kwargs parameter).
Returns:
True is the slide will be shown, False otherwise.
"""
if not play_kwargs:
play_kwargs = kwargs
else:
play_kwargs.update(kwargs)
slide_obj = self.add_slide(name=slide_name,
config=dict(widgets=widgets, background_color=background_color),
priority=priority, key=key)
return self.show_slide(slide_name=slide_obj.name, transition=transition,
priority=priority, force=force, key=key,
expire=expire, play_kwargs=play_kwargs)
def remove_slide(self, slide: Union["Slide", str],
transition_config: Optional[dict] = None) -> bool:
"""Remove a slide from the display.
Args:
slide: The slide to remove (can be name string or Slide object)
transition_config: Optional dictionary containing the transition configuration
to use while removing the slide (overrides slide setting).
Returns:
True if the slide is scheduled to be removed, False otherwise
Notes:
You can't remove the automatically generated blank slide, so if you try it will
raise an exception.
"""
# TODO:
# Warning, if you just created a slide, you have to wait at least on
# tick before removing it. Can we prevent that? What if someone tilts
# at the exact perfect instant when a mode was starting or something?
# maybe we make sure to run a Kivy tick between bcp reads or something?
try:
slide = self.get_slide(slide)
except ScreenManagerException: # no slide by that name
if not isinstance(slide, Slide):
return False
# Do not allow the blank slide to be removed
if slide.name == self._blank_slide_name:
return False
slide.prepare_for_removal()
self.mc.active_slides.pop(slide.name, None)
# If the current slide is the active one, find the next highest
# priority one to show instead.
if self.current_slide == slide:
new_slide = self._get_next_highest_priority_slide(slide)
if self.transition:
self.transition.stop()
if transition_config:
self.transition = self.mc.transition_manager.get_transition(
transition_config)
elif self.current_slide.transition_out:
self.transition = self.mc.transition_manager.get_transition(
self.current_slide.transition_out)
else:
self.transition = NoTransition()
self.transition.bind(on_complete=self._remove_transition)
else:
new_slide = None
# Set the new slide first, so we can transition out of the old before removing
if new_slide:
self._set_current_slide(new_slide)
try:
self.remove_widget(slide)
except ScreenManagerException:
return False
return True
def _remove_transition(self, transition):
"""Remove transition if done."""
if self.transition == transition:
self.transition = NoTransition()
def _set_current_slide(self, slide: "Slide"):
# slide frame requires at least one slide, so if you try to set current
# to None, it will create a new slide called '<display name>_blank' at
# priority 0 and show that one
# I think there's a bug in Kivy 1.9.1. According to the docs, you
# should be able to set self.current to a screen name. But if that
# screen is already managed by this screen manager, it will raise
# an exception, and the source is way deep in their code and not
# easy to fix by subclassing. So this is sort of a hack that looks
# for that exception, and if it sees it, it just removes and
# re-adds the screen.
if not slide:
slide = self.create_blank_slide()
if self.current == slide.name:
return
try:
self.current = slide.name
except KivyWidgetException:
self.remove_widget(slide)
self.add_widget(slide)
self.current = slide.name
# Post the event via callback at the end of the frame in case more than
# one slide was set in this frame, so we only want to post the event
# for the slide that actually became active. The Kivy clock event will
# only call the associated callback once per frame when triggered no
# matter how many times it is called.
self._current_slide_changed()
def _set_current_slide_name(self, slide_name):
try:
self._set_current_slide(self.get_screen(slide_name))
except ScreenManagerException:
raise ValueError('Cannot set current slide to "{}" as there is '
'no slide in this slide_frame with that '
'name'.format(slide_name))
def _get_next_highest_priority_slide(self, slide: "Slide") -> "Slide":
"""Return the slide with the next highest priority."""
new_slide = None
for s in self.slides:
if s == slide:
continue
elif not new_slide:
new_slide = s
elif s.priority > new_slide.priority:
new_slide = s
elif (s.priority == new_slide.priority and
s.creation_order > new_slide.creation_order):
new_slide = s
return new_slide
def add_widget_to_current_slide(self, widget: "KivyWidget"):
"""Adds the widget to the current slide."""
self.current_slide.add_widget(widget)
def add_widgets_to_current_slide(self, widgets: List["KivyWidget"]):
"""Adds a list of widgets to the current slide."""
for w in widgets:
self.add_widget_to_current_slide(w)
def remove_widgets_by_key(self, key: str) -> None:
"""Removes all widgets with the specified key."""
for widget in self.find_widgets_by_key(key):
widget.prepare_for_removal()
if isinstance(widget, Widget) and widget.container and widget.container.parent:
widget.container.parent.remove_widget(widget.container)
elif widget.parent:
widget.parent.remove_widget(widget)
def find_widgets_by_key(self, key: str) -> List["KivyWidget"]:
"""Retrieves a list of all widgets with the specified key value."""
widgets = []
# First find all matching widgets owned by the slide parent
for child in self.parent_widgets:
widgets.extend([x for x in child.walk(restrict=True, loopback=False)
if hasattr(x, "key") and x.key == key])
# Finally find all matching widgets owned by each slide
for slide in self.slides:
widgets.extend(slide.find_widgets_by_key(key))
return widgets
def _post_active_slide_event(self, dt) -> None:
"""Posts an event that a new slide is now active."""
del dt
self.mc.post_mc_native_event('slide_{}_active'.format(self.current_slide_name))
"""event: slide_(name)_active
desc: A slide called (name) has just become active, meaning that
it's now showing as the current slide.
This is useful for things like the widget_player where you want to
target a widget for a specific slide, but you can only do so if
that slide exists.
Slide names do not take into account what display they're playing on,
so be sure to create machine-wide unique names when you're naming
your slides.
"""
class DisplayOutput(Scatter):
"""Show a display as a widget."""
def __init__(self, parent: "KivyWidget", display: "Display", **kwargs):
kwargs.setdefault('do_scale', False)
kwargs.setdefault('do_translation', False)
kwargs.setdefault('do_rotation', False)
super().__init__(**kwargs)
self.key = | |
LayerStack.
`layers` is a list of TransformerLayer objects representing the
building blocks of the transformer model, e.g.
transformer_layers.SelfAttention.
In addition, there are a bunch of other transformations which occur around
the layer body, and at the beginning and the end of the layer stack. We
call these "sublayers". They are configurable with the `sublayers_initial`,
`sublayers_per_layer`, and `sublayers_final` arguments, each of which takes
a list of sublayer functions.
Each of the sublayer functions has signature:
x, layer_stack, context -> y
where x is the input tensor and y is the output tensor.
The default sublayers specified in defaults.gin are:
transformer.LayerStack.sublayers_initial = [
@transformer.sublayer_dropout,
]
transformer.LayerStack.sublayers_per_layer = [
@transformer.sublayer_rms_norm,
@transformer.sublayer_call_layer,
@transformer.sublayer_dropout,
@transformer.sublayer_residual,
]
transformer.LayerStack.sublayers_final = [
@transformer.sublayer_rms_norm,
@transformer.sublayer_dropout,
]
Refer to these as examples of how to write and call your own sublayer
functions.
`dropout_rate` and `norm_epsilon` should only be specified in a legacy mode,
for compatiblity with older checkpoints.
Args:
layers: a list of TransformerLayer
sublayers_initial: an optional list of sublayer functions
sublayers_per_layer: an optional list of sublayer functions
sublayers_final: an optional list of sublayer functions
dropout_rate: DEPRECATED - a floating-point number
norm_epsilon: DEPRECATED - a floating-point number
recompute_grads: a boolean
"""
self._layers = layers
self._recompute_grads = recompute_grads
self._sublayers_initial = sublayers_initial
self._sublayers_per_layer = sublayers_per_layer
self._sublayers_final = sublayers_final
if (dropout_rate is not None) != (norm_epsilon is not None):
raise ValueError(
"LayerStack.dropout_rate and LayerStack.norm_epsilon should either "
"be both not None (legacy mode) or both None (normal mode)")
if dropout_rate is not None:
self._legacy_init(dropout_rate, norm_epsilon)
def _legacy_init(self, dropout_rate, norm_epsilon):
"""Legacy initialization for use with old checkpoints.
dropout_rate and norm_epsilon are specified in LayerStack.
Custom sublayers are not specified.
Args:
dropout_rate: a float
norm_epsilon: a float
"""
self.dropout_rate = dropout_rate
self.norm_epsilon = norm_epsilon
if (self._sublayers_initial is not None or
self._sublayers_per_layer is not None or
self._sublayers_final is not None):
tf.logging.warning("legacy mode - ignoring custom sublayers")
self._sublayers_initial = [sublayer_legacy_dropout]
self._sublayers_per_layer = [sublayer_legacy_rms_norm,
sublayer_call_layer,
sublayer_legacy_dropout,
sublayer_residual]
self._sublayers_final = [sublayer_legacy_final_rms_norm,
sublayer_legacy_dropout]
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context)
context.layer_outputs.append(x)
for lnum, layer in enumerate(self._layers):
with tf.variable_scope(layer.name or ""):
if self._recompute_grads:
def fn(x, l=layer, c=context):
return self._layer_fn(x, l, c)
x = mtf.recompute_grad(fn, [x])
else:
x = self._layer_fn(x, layer, context)
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = self._call_sublayers(self._sublayers_final, x, context)
x = sublayer_mask_padding(x, self, context)
context.layer_outputs.append(x)
return x
def _call_sublayers(self, sublayers, x, context):
for s in sublayers:
x = s(x, self, context)
return x
def _layer_fn(self, x, layer, context):
"""Call the layer and its associated sublayers.
Args:
x: a Tensor
layer: a Layer
context: a Context
Returns:
a Tensor
"""
context.current_layer = layer
context.current_layer_input = x
y = self._call_sublayers(self._sublayers_per_layer, x, context)
if y.shape != x.shape:
raise ValueError(
"Layer %s returned misshaped output x=%s y=%s"
% (layer.__class__.__name__, x, y))
return y
@property
def num_layers(self):
return len(self.layers)
@property
def layers(self):
return self._layers
@gin.configurable
def sublayer_call_layer(x, layer_stack, context):
x = sublayer_mask_padding(x, layer_stack, context)
layer = context.current_layer
with tf.variable_scope(layer.__class__.__name__):
return layer.call(context, x)
@gin.configurable
def sublayer_mask_padding(x, layer_stack, context):
"""Zero out padding regions.
This "fixes" a bug where extreme values leak from the padding into the
non-padding regions.
TODO(noam): undertand this better and make a more principled fix.
Args:
x: a Tensor
layer_stack: ignored
context: a Tensor
Returns:
a Tensor
"""
del layer_stack
if isinstance(context.sequence_id, mtf.Tensor):
return x * mtf.cast(
mtf.not_equal(context.sequence_id, 0), context.activation_dtype)
else:
return x
@gin.configurable
def sublayer_rms_norm(x, layer_stack, context, epsilon=1e-6, name="rms_norm"):
"""RMS normalization.
Args:
x: an input mtf.Tensor
layer_stack: a LayerStack
context: a Context
epsilon: a float
name: a string
Returns:
a mtf.Tensor
"""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope(name):
scale = mtf.get_variable(
context.mesh,
"scale",
mtf.Shape(context.model.ensemble_dims + [model_dim]),
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
variance = mtf.reduce_mean(mtf.square(x), reduced_dim=model_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_legacy_rms_norm(x, layer_stack, context):
"""Deprecated - keep for checkpoint/operative_config.gin compatibility."""
return sublayer_rms_norm(x, layer_stack, context, name="layer_norm")
@gin.configurable
def sublayer_legacy_final_rms_norm(x, layer_stack, context):
"""Deprecated - keep for checkpoint/operative_config.gin compatibility."""
return sublayer_rms_norm(x, layer_stack, context, name="final_layer_norm")
@gin.configurable
def sublayer_rms_norm_subsampled(x, layer_stack, context, percentage=100.,
epsilon=1e-6):
"""RMS normalization."""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope("layer_norm_subsampled"):
scale = mtf.get_variable(
context.mesh,
"scale",
mtf.Shape(context.model.ensemble_dims + [model_dim]),
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
var_dim = mtf.Dimension(
model_dim.name,
int(math.ceil(model_dim.size * percentage/100)))
var_activations = mtf.slice(x, 0, var_dim.size, var_dim.name)
variance = mtf.reduce_mean(
mtf.square(var_activations), reduced_dim=var_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_scale_norm(x,
layer_stack,
context,
epsilon=1e-6,
name="scale_norm"):
"""Scale normalization.
Args:
x: an input mtf.Tensor
layer_stack: a LayerStack
context: a Context
epsilon: a float
name: a string
Returns:
a mtf.Tensor
"""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope(name):
scale = mtf.get_variable(
context.mesh,
"scale",
context.model.ensemble_dims,
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
variance = mtf.reduce_mean(mtf.square(x), reduced_dim=model_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_residual(x, layer_stack, context):
del layer_stack
return x + context.current_layer_input
@gin.configurable
def sublayer_dropout(x, layer_stack, context, dropout_rate=0.0):
del layer_stack
if context.train and dropout_rate > 0:
return mtf.dropout(
x, context.train, rate=dropout_rate,
noise_shape=mtf.Shape(context.batch_dims + [context.model.model_dim]))
else:
return x
@gin.configurable
def sublayer_annealed_dropout(x,
layer_stack,
context,
init_dropout_rate=0.0,
start_step=None,
end_step=None):
"""Transformer sublayer which linearly anneals the dropout rate."""
if start_step is None:
raise ValueError("The start step for dropout annealing required.")
if end_step is None:
raise ValueError("The end step for dropout annealing required.")
del layer_stack
if context.train and init_dropout_rate > 0:
return mtf.layers.annealed_dropout(
x,
context.train,
start_step,
end_step,
init_rate=init_dropout_rate,
noise_shape=mtf.Shape(context.batch_dims + [context.model.model_dim]))
else:
return x
@gin.configurable
def sublayer_clip_activation_gradient(x, layer_stack, context, rms_norm=1.0):
"""Clip activation gradient by RMS-norm."""
del layer_stack, context
return mtf.layers.clip_activation_gradient(x, rms_norm)
@gin.configurable
def sublayer_legacy_dropout(x, layer_stack, context):
return sublayer_dropout(x, layer_stack, context,
dropout_rate=layer_stack.dropout_rate)
@gin.configurable
def sublayer_rezero(x, layer_stack, context, initial_value=0.0):
"""Multiply by zero-initialized scalar (residual not included)."""
del layer_stack
rezero_weight = mtf.get_variable(
x.mesh, "rezero_weight", shape=context.model.ensemble_dims,
dtype=context.variable_dtype,
initializer=tf.constant_initializer(initial_value))
return x * rezero_weight
@gin.configurable
class ReversibleLayerStack(LayerStack):
"""A version of LayerStack that uses a revnet.
This should be very memory-efficient if LayerStack.recompute_grads
is set to True.
Also, sublayers_per_layer should be overridden in gin, so as to remove the
residual.
"Reformer" https://arxiv.org/abs/2001.04451 uses something like this.
"""
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context)
context.layer_outputs.append(x)
x1, x1_backwards, x2, x2_backwards = x, None, x, None
for lnum, layer in enumerate(self._layers):
with tf.variable_scope(layer.name or ""):
def fn(x, l=layer, c=context):
return self._layer_fn(x, l, c)
x1, x1_backwards, x2, x2_backwards = (
mtf.layers.reversible_half_residual_and_swap(
x1, x1_backwards, x2, x2_backwards, fn,
recompute_grads=self._recompute_grads))
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = x1 + x2
x = self._call_sublayers(self._sublayers_final, x, context)
context.layer_outputs.append(x)
return x
@gin.configurable
def sublayer_true_layer_norm(x, layer_stack, context, epsilon=1e-6):
"""True (aka normal) Normalization."""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope("true_layer_norm"):
return mtf.layers.layer_norm(x, model_dim, epsilon)
@gin.configurable
class Unitransformer(object):
"""A Transformer model with only one layer stack, e.g. a language model.
This class is also used as part of Bitransformer, which contains two
Unitransformers.
"""
def __init__(self,
layer_stack,
d_model=1024,
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
autoregressive=gin.REQUIRED,
max_length=gin.REQUIRED,
shared_embedding_and_softmax_weights=False,
label_smoothing=0.0,
z_loss=1e-4,
name="transformer",
layout=None,
mesh_shape=None,
vocab_divisor=128,
ensemble=None,
loss_fn=None,
positional_embedding=True,
sinusoid_positional_embedding=False,
input_full_attention=False,
loss_on_targets_only=False,
loss_denominator=None,
token_dropout_rate=0.0):
"""Create a Unitransformer.
Args:
layer_stack: a LayerStack
d_model: an integer
input_vocab_size: an integer
output_vocab_size: an integer
autoregressive: a boolean
max_length: an integer
shared_embedding_and_softmax_weights: a boolean
label_smoothing: a float
z_loss: a float
name: a string
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
vocab_divisor: an integer
ensemble: an optional integer (for creating an ensemble of models)
loss_fn: an optional function to override self._compute_loss
positional_embedding: a boolean
sinusoid_positional_embedding: a boolean, whether to use the sinusoid
positional embedding from the "Attention Is All You Need" paper. If
True, this will override the positional_embedding setting.
input_full_attention: a boolean
This is an option for seq-to-seq as a language model. Each example
consists of [<inputs>, EOS=1, <targets>, EOS=1]. In the self-attention
layers, positions in the inputs portion of the sequence can see the
entire inputs portion, while positions in the targets portion of the
sequence cannot see future positions.
loss_on_targets_only: a boolean
This is an option for seq-to-seq as a language model. Each example
consists of [<inputs>, EOS=1, <targets>, EOS=1]. We zero-out the
loss for the inputs portion of the example.
loss_denominator: an optional float. The default behavior is to
compute the mean loss across all tokens in the batch, making the
denomiator the size | |
example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
expr = self.shell.prefilter(parameter_s,False)
# Minimum time above which compilation time will be reported
tc_min = 0.1
try:
mode = 'eval'
t0 = clock()
code = compile(expr,'<timed eval>',mode)
tc = clock()-t0
except SyntaxError:
mode = 'exec'
t0 = clock()
code = compile(expr,'<timed exec>',mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
clk = clock2
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clk()
out = eval(code,glob)
end = clk()
else:
st = clk()
exec code in glob
end = clk()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
print "CPU times: user %.2f s, sys: %.2f s, total: %.2f s" % \
(cpu_user,cpu_sys,cpu_tot)
print "Wall time: %.2f s" % wall_time
if tc > tc_min:
print "Compiler : %.2f s" % tc
return out
@testdec.skip_doctest
def magic_macro(self,parameter_s = ''):
"""Define a set of input lines as a macro for future re-execution.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The notation for indicating number ranges is: n1-n2 means 'use line
numbers n1,...n2' (the endpoint is included). That is, '5-7' means
using the lines numbered 5,6 and 7.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (%hist prints it):
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with:
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with:
'print macro_name'.
For one-off cases which DON'T contain magic function calls in them you
can obtain similar results by explicitly executing slices from your
input history with:
In [60]: exec In[44:48]+In[49]"""
opts,args = self.parse_options(parameter_s,'r',mode='list')
if not args:
macs = [k for k,v in self.shell.user_ns.items() if isinstance(v, Macro)]
macs.sort()
return macs
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name,ranges = args[0], args[1:]
#print 'rng',ranges # dbg
lines = self.extract_input_slices(ranges,opts.has_key('r'))
macro = Macro(lines)
self.shell.define_macro(name, macro)
print 'Macro `%s` created. To execute, type its name (without quotes).' % name
print 'Macro contents:'
print macro,
def magic_save(self,parameter_s = ''):
"""Save a set of lines to a given filename.
Usage:\\
%save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed as the
command line is used instead.
This function uses the same syntax as %macro for line extraction, but
instead of creating a macro it saves the resulting string to the
filename you specify.
It adds a '.py' extension to the file if you don't do so yourself, and
it asks for confirmation before overwriting existing files."""
opts,args = self.parse_options(parameter_s,'r',mode='list')
fname,ranges = args[0], args[1:]
if not fname.endswith('.py'):
fname += '.py'
if os.path.isfile(fname):
ans = raw_input('File `%s` exists. Overwrite (y/[N])? ' % fname)
if ans.lower() not in ['y','yes']:
print 'Operation cancelled.'
return
cmds = ''.join(self.extract_input_slices(ranges,opts.has_key('r')))
f = file(fname,'w')
f.write(cmds)
f.close()
print 'The following commands were written to file `%s`:' % fname
print cmds
def _edit_macro(self,mname,macro):
"""open an editor with the macro data in a file"""
filename = self.shell.mktempfile(macro.value)
self.shell.hooks.editor(filename)
# and make a new macro object, to replace the old one
mfile = open(filename)
mvalue = mfile.read()
mfile.close()
self.shell.user_ns[mname] = Macro(mvalue)
def magic_ed(self,parameter_s=''):
"""Alias to %edit."""
return self.magic_edit(parameter_s)
@testdec.skip_doctest
def magic_edit(self,parameter_s='',last_call=['','']):
"""Bring up an editor and execute the resulting code.
Usage:
%edit [options] [args]
%edit runs IPython's editor hook. The default version of this hook is
set to call the __IPYTHON__.rc.editor command. This is read from your
environment variable $EDITOR. If this isn't found, it will default to
vi under Linux/Unix and to notepad under Windows. See the end of this
docstring for how to change the editor hook.
You can also set the value of this editor via the command line option
'-editor' or in your ipythonrc file. This is useful if you wish to use
specifically for IPython an editor different from your typical default
(and for Windows users who typically don't set environment variables).
This command allows you to conveniently edit multi-line code right in
your IPython session.
If called without arguments, %edit opens up an empty editor with a
temporary file and will execute the contents of this file when you
close it (don't forget to save it!).
Options:
-n <number>: open the editor at a specified line number. By default,
the IPython editor hook uses the unix syntax 'editor +N filename', but
you can configure this by providing your own modified hook if your
favorite editor supports line-number specifications with a different
syntax.
-p: this will call the editor with the same data as the previous time
it was used, regardless of how long ago (in your current session) it
was.
-r: use 'raw' input. This option only applies to input taken from the
user's history. By default, the 'processed' history is used, so that
magics are loaded in their transformed version to valid Python. If
this option is given, the raw input as typed as the command line is
used instead. When you exit the editor, it will be executed by
IPython's own processor.
-x: do not execute the edited code immediately upon exit. This is
mainly useful if you are editing programs which need to be called with
command line arguments, which you can then do using %run.
Arguments:
If arguments are given, the following possibilites exist:
- The arguments are numbers or pairs of colon-separated numbers (like
1 4:8 9). These are interpreted as lines of previous input to be
loaded into the editor. The syntax is the same of the %macro command.
- If the argument doesn't start with a number, it is evaluated as a
variable and its contents loaded into the editor. You can thus edit
any | |
min(minimum_x, x)
maximum_x = max(maximum_x, x)
minimum_y = min(minimum_y, y)
maximum_y = max(maximum_y, y)
return P2D(minimum_x, minimum_y), P2D(maximum_x, maximum_y)
# SimplePolygon.corner_arc_append():
def corner_arc_append(self, corner: P2D, corner_radius: float,
flags: str, tracing: bool = False) -> None:
"""Append a rounded corner to the polygon.
Args:
* *corner* (*P2D*): The point where the two edges would meet
without corner rounding.
* *corner_radius* (*float*): The radius of the corner arc.
* *flags* (*str*): The arc entry direction followed by thea
arc exit direction, where the direction is one of "NEWS"
for North, East, West, and South.
This code only works for corners that are right angles and
where the edges are aligned with the X and Y axes.
"""
# Pretty much everything in this routine is counter-intuitive.
# Build *flags_table*:
# The angle stored is the *opposite* of the letter flag because the it needs
# considered from from the reference of the *corner_center*:
degrees90: float = pi / 2.0
degrees180: float = degrees90 + degrees90
degrees270: float = degrees180 + degrees90
degrees360: float = degrees180 + degrees180
flags_table: Dict[str, Tuple[float, P2D]] = {
"N": (degrees270, P2D(0.0, corner_radius)),
"E": (degrees180, P2D(corner_radius, 0.0)),
"W": (0.0, P2D(-corner_radius, 0.0)),
"S": (degrees90, P2D(0.0, -corner_radius)),
}
# Do any requested *tracing*:
# tracing = True
if tracing: # pragma: no cover
print("===>corner_arc_append()")
print(f"corner:{corner}")
print(f"flags:'{flags}'")
# Verify that *flags* is valid:
assert len(flags) == 2
start_flag: str = flags[0]
end_flag: str = flags[1]
assert start_flag in "NEWS"
assert end_flag in "NEWS"
if tracing: # pragma: no cover
print(f"start_flag:'{start_flag}'")
print(f"end_flag:'{end_flag}'")
# Extract the angles and offsets associated with each flag:
start_angle: float
end_angle: float
start_offset: P2D
end_offset: P2D
start_angle, start_offset = flags_table[start_flag]
end_angle, end_offset = flags_table[end_flag]
if tracing: # pragma: no cover
print(f"start_angle:{degrees(start_angle):.5f} deg")
print(f"end_angle:{degrees(end_angle):.5f} deg")
print(f"start_offset:{start_offset}")
print(f"end_offset:{end_offset}")
# These are fix-ups are a kludge to deal with some corner cases:
if start_angle - end_angle > degrees180:
start_angle -= degrees360
if end_angle - start_angle > degrees180:
end_angle -= degrees360
# Append the corner arc to *external_polygon*:
corner_center: P2D = corner + start_offset + end_offset
if tracing: # pragma: no cover
print(f"corner_center:{corner_center}")
# This is really counter intuitive, *start_angle* and *end_angle* are swapped.
# If you draw it on a piece of paper, you will realize that the arc angle is
# always perpendicular to the entry and exit edges. It turns out that swapping
# the two angles performs the correct 90 degree rotation.
if tracing: # pragma: no cover
print(f"end_angle={degrees(end_angle):.5f} deg")
print(f"start_angle={degrees(start_angle):.5f} deg")
# Finally perform the *arc_append*:
external_polygon: SimplePolygon = self
external_polygon.arc_append(corner_center, corner_radius,
end_angle, start_angle, 0.0, tracing)
if tracing: # pragma: no cover
print("<===corner_arc_append()")
# SimplePolygon.is_locked():
def is_locked(self) -> bool:
"""Return whether SimplePolygon is locked or not."""
# Grab *locked* flag from *simple_polygon* (i.e. *self*) and return it:
simple_polygon: SimplePolygon = self
locked: bool = simple_polygon.locked
return locked
# SimplePolygon.key():
def key(self) -> Tuple[Any, ...]:
"""Return a key for *simple_polygon*."""
simple_polygon: SimplePolygon = self
name: str = simple_polygon.name
points: List[P2D] = simple_polygon.points
assert len(points) >= 1
point0: P2D = points[0]
x: float = point0.x
y: float = point0.y
x_maximum: float = x
x_minimum: float = x
y_maximum: float = y
y_minimum: float = y
point: P2D
for point in points:
x = point.x
y = point.y
x_maximum = max(x_maximum, x)
x_minimum = min(x_minimum, x)
y_maximum = max(y_maximum, y)
y_minimum = min(y_minimum, y)
dx: float = x_maximum - x_minimum
dy: float = y_maximum - y_minimum
x_center: float = (x_maximum + x_minimum) / 2.0
y_center: float = (y_maximum + y_minimum) / 2.0
key: Tuple[Any, ...] = ("SimplePolygon", name, x_center, y_center, dx, dy, 0.0)
return key
# SimplePolygon.lock():
def lock(self) -> None:
"""Force SimplePolygon to be locked."""
simple_polygon: SimplePolygon = self
simple_polygon.locked = True
# SimplePolygon.point_append():
def point_append(self, point: P2D) -> None:
"""Append a point to a SimplePolygon.
Args:
*point* (*P2D*): The 2-dimensional point to the
to *simple_polygon* (i.e. *self*.)
Raises:
*ValueError*(*str*): if *simple_polygon* (i.e. *self*.)
is locked.
"""
# Grab *points* from *simple_polygon* (i.e. *self*) and tack *point* onto the end:
simple_polygon: SimplePolygon = self
points: List[P2D] = simple_polygon.points
points.append(point)
# SimplePolygon.points_extend():
def points_extend(self, new_points: List[P2D]) -> None:
"""Append a list of points to a SimplePolygon.
Args:
*new_points* (*List*[*P2D*]): A list of points to append.
Raises:
*ValueError*(*str*): if *simple_polygon* (i.e. *self*.)
is locked.
"""
# Grab *points* from *simple_polygon* (i.e. *self*) and tack *new_points* onto the end:
simple_polygon: SimplePolygon = self
points: List[P2D] = simple_polygon.points
new_point: P2D
for new_point in new_points:
points.append(new_point)
# Scad.points_get():
def points_get(self) -> List[P2D]:
"""Return the points associated with SimplePolygon."""
simple_polygon: SimplePolygon = self
points: List[P2D] = simple_polygon.points
# Make a copy:
points = list(points[:])
return points
# SimplePolygon.points_scad_lines_append():
def points_scad_lines_append(self, scad_lines: List[str], indent: str, start_index: int) -> int:
"""Append the Polygon points to a list of lines.
Args:
*scad_lines* (*List*[*str*]): The list of OpenSCAD lines to
append to.
*indent (*str): The indentation text to prefix to each line.
*start_index* (*int*): The starting index for points.
Returns:
(*int*) Returns the *end_index* after the points have been
output.
"""
# Grab some values from *simple_polygon* (i.e. *self*):
simple_polygon: SimplePolygon = self
name: str = simple_polygon.name
points: List[P2D] = simple_polygon.points
# Compute *end_index* from *start_index* and *points_size*:
points_size: int = len(points)
end_index: int = start_index + points_size
# Figure out the number of *slice_points* to output:
slice_size: int = 4
slices_count: int = int(ceil(float(points_size) / float(slice_size)))
# Append a debugging line:
scad_lines.append(f"{indent} // Polygon '{name}' {start_index}:{end_index-1}")
# Sweep through *points* and output chunks of *slice_points*:
slice_index: int
for slice_index in range(slices_count):
# Extract the next chunk of *slice_points*:
slice_start: int = slice_index * slice_size
slice_end: int = min((slice_index + 1) * slice_size, points_size)
slice_points: List[P2D] = points[slice_start:slice_end]
# Just to be paranoid, make sure we actually have at least one point:
if slice_points:
# Splice *splice_point* together as a comma separated list:
point_texts: List[str] = []
slice_point: P2D
for slice_point in slice_points:
x_text: str = "{0:.5f}".format(slice_point.x)
y_text: str = "{0:.5f}".format(slice_point.y)
x_text = "0.000" if x_text == "-0.000" else x_text
y_text = "0.000" if y_text == "-0.000" else y_text
point_texts.append(f"[{x_text}, {y_text}]")
slice_text: str = ', '.join(point_texts)
scad_lines.append(f"{indent} {slice_text}, "
f"// {start_index + slice_start}:"
f"{start_index + slice_end - 1}")
return end_index
# SimplePolygon.polygon_append():
def polygon_append(self, tail_polygon: "SimplePolygon") -> None:
"""Treat a polygon as a list of points and append them."""
simple_polygon: SimplePolygon = self
polygon_points: List[P2D] = simple_polygon.points
for point in tail_polygon.points:
if len(polygon_points) == 0:
polygon_points.append(point)
else:
# Do not append two points that are too close to one another:
last_point: P2D = polygon_points[-1]
if last_point.distance(point) > 0.00001:
if simple_polygon.locked:
raise ValueError(f"'{simple_polygon.name}' is locked.")
polygon_points.append(point)
# SimplePolygon.rounded_arc_append():
def rounded_arc_append(self, flags: str, arc_center: P2D, arc_radius: float,
start_angle: float, end_angle: float, corner_radius: float) -> None:
"""Append an arc with rounded corners to the external polygon.
Args:
* *flags* (*str*):
Two 3-character strings (6 characters total) that are
passed down to *SimplePolygon*.*arc_edge_corner_append*().
The first 3 character string is for *start_angle* and
the second 3 character string is for *end_angle*.
* *arc_center* (*P2D*):
The center of the large arc (millimeters, millimeters).
* *arc_radius* (*float*):
The arc radius of the large arc in millimeters.
* *start_angle* (*float*):
The start angle in radians.
* *end_angle* (*float*):
The end angle in radians.
* *corner_radius* (*float*):
The corner radius in millimeters.
"""
# Extract the *start_flags* and *end_flags* from *flags*:
assert len(flags) == 8
start_flags = flags[0:4]
end_flags = flags[4:8]
# Create *start_corner* and hang onto *start_angle*:
start_polygon: SimplePolygon = SimplePolygon("Start Corner", [], lock=False)
start_corner_angle: float = start_polygon.arc_edge_corner_append(arc_center, arc_radius,
start_angle,
corner_radius,
start_flags)
# Create *end_corner* and hang onto *end_angle*:
end_polygon: SimplePolygon = SimplePolygon("End Corner", [], lock=False)
end_corner_angle: float = end_polygon.arc_edge_corner_append(arc_center, arc_radius,
end_angle, corner_radius,
end_flags)
# Now append the *start_corner*, intermediate arc, and the *end_corner* to
# *simple_polygon* (i.e. *self*):
simple_polygon: SimplePolygon = self
simple_polygon.polygon_append(start_polygon)
simple_polygon.arc_append(arc_center, arc_radius, start_corner_angle, end_corner_angle, 0.0)
simple_polygon.polygon_append(end_polygon)
# SimplePolygon.scad_lines_append():
def scad_lines_append(self, scad_lines: List[str], indent: str) -> None:
"""TODO."""
# Grab *class_name* from *simple_polygon* (i.e. | |
...]]
"""
return list(product(*(factor.levels for factor in self.window.factors)))
@dataclass(eq=False)
class ElseLevel(Level):
# TODO: I'm honestly not sure what this kind of level is for, semantically.
"""A :class:`.Level` for... something.
:param name:
The name of the level.
"""
def derive_level_from_levels(self, other_levels: List[DerivedLevel]) -> DerivedLevel:
"""Converts the :class:`.ElseLevel` into a :class:`.DerivedLevel` by
combining it with other specified
:class:`DerivedLevels <.DerivedLevel>`.
:param other_levels:
A list of :class:`DerivedLevels <.DerivedLevel>` for use in
constructing the new level.
"""
if not other_levels:
return DerivedLevel(self.name, WithinTrialDerivationWindow(lambda: False, []))
first_level = other_levels[0]
# TODO: This is very odd. We only take every *n*th factor from the
# derivation window (where *n* is the window's width). This is
# because the initializer for `DerivedLevel`s expands the list of
# factors to duplicate by the width.
# It seems like this should be rethought. Why go through the
# trouble of duplicating the factors only to de-duplicate them
# later? Perhaps `DerivedLevel`s need a different internal
# representation to avoid this real duplication.
factors = first_level.window.factors[::first_level.window.width]
# TODO: This exhibits the same issue as the preceding TODO.
window = DerivationWindow(lambda *args: not any(map(lambda l: l.window.predicate(*args), other_levels)),
factors,
first_level.window.width,
first_level.window.stride)
return DerivedLevel(self.name, window)
###############################################################################
##
## Factors
##
@dataclass
class Factor:
"""An independent variable in a factorial experiment. Factors are composed
of :class:`Levels <.Level>` and come in two flavors:
- :class:`.SimpleFactor`
- :class:`.DerivedFactor`
However, both classes can be implicitly constructed by the base
:class:`.Factor` constructor, so we recommend you always use that for
creating factors.
During :class:`.Factor` construction, the first :class:`.Level` in the
:attr:`~.Factor.initial_levels` is dynamically type-checked. If it's a
:class:`.DerivedLevel` or :class:`.ElseLevel`, a `.DerivedFactor` will be
initialized. Otherwise, a `.SimpleFactor` will be initialized.
In all cases, the :attr:`~.Factor.initial_levels` will be processed. This
step ensures that all of a factor's :attr:`~.Factor.levels` will always be
:class:`Levels <.Level>`. The levels are processed according to these
rules:
1. A :class:`.Level` instance is left alone.
2. A :class:`str` instance is converted into a :class:`.SimpleLevel`.
3. A :class:`tuple` or :class:`list` consisting of exactly one :class:`str`
followed by one :class:`int` will be converted into a
:class:`.SimpleLevel` with the :class:`str` as its name and the
:class:`int` as its weight.
4. Anything else is converted into a :class:`.SimpleLevel` by using its
string representation as a level name.
.. note::
The :class:`.DerivedFactor` subclass does additional processing after
these steps.
:param name:
The name of this factor.
:param initial_levels:
The levels comprising this factor. The list can be made of anything,
but any values in the list that are not instances of :class:`.Level` or
one of its subclasses will be converted into :class:`.SimpleLevel`
instances by using their string representation, as determined by
``SimpleLevel(str(value))``.
:type initial_levels: typing.Sequence[Any]
:rtype: .Factor
.. tip::
See :ref:`the Factorial Experiment Design section of the SweetPea guide
<guide_factorial_design>` for more about factors, levels, and how to
use them.
"""
#: The name of this factor.
name: str
#: The levels used during factor initialization.
initial_levels: InitVar[Sequence[Any]]
#: The discrete values that this factor can have.
levels: Sequence[Level] = field(init=False)
#: A mapping from level names to levels for constant-time lookup.
_level_map: Dict[str, Level] = field(init=False, default_factory=dict)
def __new__(cls, name: str, initial_levels: Sequence[Any], *_, **__) -> Factor:
# Ensure we got a string for a name. This requirement is imposed for
# backwards compatibility, but it should be handled by type-checking.
if not isinstance(name, str):
raise ValueError(f"Factor name not a string: {name}.")
# Check if we're initializing this from `Factor` directly or one of its
# subclasses.
if cls != Factor:
# It's a subclass, so we do nothing special.
return super().__new__(cls)
# Otherwise, we have to check whether to return a subclass instance.
# This requires there to be at least 1 initial level.
if not initial_levels:
raise ValueError(f"Expected at least one level for factor {name}.")
first_level = initial_levels[0]
if isinstance(first_level, (DerivedLevel, ElseLevel)):
instance = super().__new__(DerivedFactor)
else:
instance = super().__new__(SimpleFactor)
return instance
def __post_init__(self, initial_levels: Sequence[Any]):
# First, we convert the given initial levels into actual `Level`s. To
# ensure the input list is untouched, we copy any levels that came in.
real_levels: List[Level] = []
for level in initial_levels:
if isinstance(level, Level):
pass
elif isinstance(level, str):
level = SimpleLevel(level)
elif (isinstance(level, (tuple, list))
and len(level) == 2
and isinstance(level[0], str)
and isinstance(level[1], int)):
level = SimpleLevel(level[0], level[1])
else:
level = SimpleLevel(str(level))
real_levels.append(level)
# Then we do any necessary post-processing of the levels.
self.levels = self._process_initial_levels(real_levels)
# Lastly, ensure all the levels have distinct names. We also use this
# step to initialize the internal level map, which allows for constant-
# time lookup of levels by name.
for level in self.levels:
if level.name in self._level_map:
raise ValueError(f"Factor {self.name} instantiated with duplicate level {level.name}.")
self._level_map[level.name] = level
level.factor = self
# NOTE: Subclasses of `Factor` must override this method!
# NOTE: This method cannot be decorated `@abstractmethod` because the
# `abc.ABC` class does not play well with `@dataclass`. Additionally,
# `Factor` is not actually an abstract base class because the custom
# `__new__` implementation prevents it from ever being instantiated.
# There is no way to express "a class which cannot be instantiated"
# that I know of, so there's no way to get around this dynamic
# `NotImplementedError` solution.
def _process_initial_levels(self, initial_levels: Sequence[Level]) -> Sequence[Level]:
raise NotImplementedError
def __deepcopy__(self, memo: Dict):
cls = self.__class__
new_instance = cls.__new__(cls, self.name, [])
memo[id(self)] = new_instance
for attr, val in self.__dict__.items():
setattr(new_instance, attr, deepcopy(val, memo))
return new_instance
def __eq__(self, other) -> bool:
if not isinstance(other, type(self)):
return False
return self.name == other.name and self.levels == other.levels
def __str__(self) -> str:
levels_string = '[' + ', '.join(map(str, self.levels)) + ']'
return f"{type(self).__name__}<{self.name} | {levels_string}>"
def __hash__(self) -> int:
return hash(self.name)
def __getitem__(self, name: str) -> Level:
value = self.get_level(name)
if value is None:
raise KeyError(f"Factor {self.name} has no level named {name}.")
return value
def __contains__(self, name: str) -> bool:
return name in self._level_map
def get_level(self, name: str) -> Optional[Level]:
"""Returns a :class:`.Level` instance corresponding to the given name,
if it exists within this factor. Otherwise, returns ``None``.
"""
return self._level_map.get(name)
# TODO: This should be made private.
@property
def first_level(self) -> Level:
"""The first :class:`.Level` in this factor."""
return self.levels[0]
# TODO: Convert to a property. (backwards compatibility)
# NOTE: Alternatively, we should maybe instead prefer an actual type check
# in most spots in the code since that will give accurate type
# checking feedback.
def is_derived(self) -> bool:
"""Whether this factor is derived.
.. deprecated:: 0.1.0
Instead of using this function, we recommend doing a dynamic type
check with :func:`isinstance`. This provides the same semantic
information to the programmer while also providing greater type
guarantees when using a static type checker, such as mypy.
.. code-block:: python
factor: Factor = ...
if isinstance(factor, DerivedFactor):
# Code requiring a derived factor.
...
else:
# Code if it's not a derived factor.
...
"""
return isinstance(self, DerivedFactor)
@property
def has_complex_window(self) -> bool:
"""Whether this factor has a complex derivation window.
A complex derivation window is a window of derived factors whose
first-level derivations are themselves considered complex.
"""
if not isinstance(self, DerivedFactor):
return False
window = self.first_level.window
return (window.width > 1
or window.stride > 1
or window.is_complex)
def applies_to_trial(self, trial_number: int) -> bool:
"""Whether this factor applies to the given trial. For example, factors
with :class:`.TransitionDerivation` derivations in their levels do not
apply to trial number ``1``, but do apply to all subsequent trials.
.. tip::
Trials start their numbering at ``1``.
"""
if trial_number <= 0:
raise ValueError(f"Trial numbers must be 1 or greater; got {trial_number}.")
if not isinstance(self, DerivedFactor):
return True
def acc_width(d: DerivationWindow) -> int:
if isinstance(d.first_factor, DerivedFactor) and d.first_factor.has_complex_window:
return d.width + acc_width(d.first_factor.first_level.window) - 1
return d.width
window = self.first_level.window
return (trial_number >= acc_width(window)
and (trial_number - window.width) % window.stride == 0)
# TODO: REMOVE. (backwards compatibility)
@property
def factor_name(self) -> str:
"""An alias for :attr:`.Factor.name` for | |
"b24-31-cap29": {
"ap_mac": "6c71.edff.0593",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap33": {
"ap_mac": "6c71.edff.0597",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap23": {
"ap_mac": "6c71.edff.05a7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap19": {
"ap_mac": "6c71.edff.05b7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap29": {
"ap_mac": "6c71.edff.05bb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap21": {
"ap_mac": "6c71.edff.05bf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap27": {
"ap_mac": "6c71.edff.05c7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap35": {
"ap_mac": "6c71.edff.096f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-2-test-9130": {
"ap_mac": "6c71.edff.09a7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap20": {
"ap_mac": "6c71.edff.09bb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap9": {
"ap_mac": "6c71.edff.09bf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap17": {
"ap_mac": "6c71.edff.09cb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap12": {
"ap_mac": "6c71.edff.09cf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap24": {
"ap_mac": "6c71.edff.09db",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap31": {
"ap_mac": "6c71.edff.09e3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-21-cap30": {
"ap_mac": "6c71.edff.0a47",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap37": {
"ap_mac": "6c71.edff.0c67",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap35": {
"ap_mac": "6c71.edff.0cb3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap37": {
"ap_mac": "6c71.edff.0e7f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap12": {
"ap_mac": "6c71.edff.0e87",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap1": {
"ap_mac": "6c71.edff.0ea7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap3": {
"ap_mac": "6c71.edff.0ebf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap35": {
"ap_mac": "6c71.edff.0ec3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap1": {
"ap_mac": "6c71.edff.0ec7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap7": {
"ap_mac": "6c71.edff.0ecf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap18": {
"ap_mac": "6c71.edff.0edb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap4": {
"ap_mac": "6c71.edff.0fe6",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap7": {
"ap_mac": "6c71.edff.0fee",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap3": {
"ap_mac": "6c71.edff.0ff6",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap39": {
"ap_mac": "6c71.edff.0f03",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap25": {
"ap_mac": "6c71.edff.0f07",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap8": {
"ap_mac": "6c71.edff.0f0b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap6": {
"ap_mac": "6c71.edff.0f0f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap27": {
"ap_mac": "6c71.edff.1733",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap9": {
"ap_mac": "6c71.edff.19bf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap22": {
"ap_mac": "6c71.edff.19d7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap25": {
"ap_mac": "6c71.edff.1a9b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap5": {
"ap_mac": "6c71.edff.1c37",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap19": {
"ap_mac": "6c71.edff.1d9f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap1": {
"ap_mac": "6c71.edff.1daf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap9": {
"ap_mac": "6c71.edff.1db7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap17": {
"ap_mac": "6c71.edff.1de3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap23": {
"ap_mac": "6c71.edff.1eea",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap24": {
"ap_mac": "6c71.edff.1efa",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap24": {
"ap_mac": "6c71.edff.1efe",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap11": {
"ap_mac": "6c71.edff.1e03",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap27": {
"ap_mac": "6c71.edff.1e07",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap2": {
"ap_mac": "6c71.edff.1e0b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap20": {
"ap_mac": "6c71.edff.1e13",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap32": {
"ap_mac": "6c71.edff.1e1b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap38": {
"ap_mac": "6c71.edff.1e1f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap26": {
"ap_mac": "6c71.edff.1e27",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap11": {
"ap_mac": "6c71.edff.1e2b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap2": {
"ap_mac": "6c71.edff.1e2f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap24": {
"ap_mac": "6c71.edff.1e33",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap21": {
"ap_mac": "6c71.edff.1e37",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap26": {
"ap_mac": "6c71.edff.1e3b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap34": {
"ap_mac": "6c71.edff.1e3f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap30": {
"ap_mac": "6c71.edff.1e47",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap10": {
"ap_mac": "6c71.edff.1e4b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap12": {
"ap_mac": "6c71.edff.1e53",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-11-cap2": {
"ap_mac": "6c71.edff.1e5b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap19": {
"ap_mac": "6c71.edff.1e63",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap30": {
"ap_mac": "6c71.edff.1e6b",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap28": {
"ap_mac": "6c71.edff.1e73",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap32": {
"ap_mac": "6c71.edff.1e77",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap30": {
"ap_mac": "6c71.edff.1e7f",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap11": {
"ap_mac": "6c71.edff.1e87",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap10": {
"ap_mac": "6c71.edff.1e93",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap33": {
"ap_mac": "6c71.edff.1e97",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap14": {
"ap_mac": "6c71.edff.1ea3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-32-cap4": {
"ap_mac": "6c71.edff.1ea7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-22-cap21": {
"ap_mac": "6c71.edff.1eab",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap29": {
"ap_mac": "6c71.edff.1eb3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap29": {
"ap_mac": "6c71.edff.1eb7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap27": {
"ap_mac": "6c71.edff.1ebb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-21-cap5": {
"ap_mac": "6c71.edff.1ebf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-2_6dd65",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-31-cap23": {
"ap_mac": "6c71.edff.1ec7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-3_fed6b",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-31-cap8": {
"ap_mac": "6c71.edff.1ecb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap3": {
"ap_mac": "6c71.edff.1ed3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-22-cap15": {
"ap_mac": "6c71.edff.1ed7",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-2_88bd2",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-12-cap32": {
"ap_mac": "6c71.edff.1edb",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-1_603d5",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-12-cap1": {
"ap_mac": "6c71.edff.1edf",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap22": {
"ap_mac": "6c71.edff.1ee3",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B23_B23-3_f4142",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b24-11-cap19": {
"ap_mac": "6c71.edff.1fe6",
"site_tag_name": "default-site-tag-fabric",
"policy_tag_name": "PT_Fabri_B24_B24-1_87d8f",
"rf_tag_name": "Standard",
"misconfigured": "No",
"tag_source": "Static",
},
"b23-32-cap33": {
"ap_mac": "6c71.edff.1ff2",
"site_tag_name": | |
2),('yi', 1),
]),
(7,[('cheng', 2),('xiang', 4),('ci', 2),('tang', 2),('he', 2),('chu', 4),('xun', 2),
('jin', 3),('guan', 1),('cheng', 2),('wai', 4),('bai', 3),('sen', 1),('sen', 1),
('ying', 4),('jie', 1),('bi', 4),('cao', 3),('zi', 4),('chun', 1),('se', 4),
('ge', 2),('ye', 4),('huang', 2),('li', 2),('kong', 1),('hao', 3),('yin', 1),
('san', 1),('gu', 4),('pin', 2),('fan', 2),('tian', 1),('xia', 4),('ji', 4),
('liang', 3),('chao', 2),('kai', 1),('ji', 4),('lao', 3),('chen', 2),('xin', 1),
('chu', 1),('shi', 1),('wei', 4),('jie', 2),('shen', 1),('xian', 1),('si', 3),
('chang', 2),('shi', 3),('ying', 1),('xiong', 2),('lei', 4),('man', 3),('jin', 1),
]),
(7,[('she', 4),('nan', 2),('she', 4),('bei', 3),('jie', 1),('chun', 1),('shui', 3),
('dan', 4),('jian', 4),('qun', 2),('ou', 1),('ri', 4),('ri', 4),('lai', 2),
('hua', 1),('jing', 4),('bu', 4),('cen', 2),('yuan', 2),('ke', 4),('sao', 3),
('peng', 2),('men', 2),('jin', 1),('shi', 3),('wei', 4),('jun', 1),('kai', 1),
('pan', 2),('sun', 1),('shi', 4),('yuan', 3),('wu', 2),('jian', 1),('wei', 4),
('zun', 1),('jiu', 3),('jia', 1),('pin', 2),('zhi', 3),('jiu', 4),('pei', 1),
('ken', 2),('yu', 3),('lin', 2),('weng', 1),('xiang', 1),('dui', 4),('yin', 3),
('ge', 2),('li', 2),('hu', 1),('qu', 3),('jin', 4),('yu', 2),('bei', 1),
]),
(7,[('xi', 1),('shan', 1),('bai', 2),('xue', 3),('san', 1),('cheng', 2),('shu', 4),
('nan', 2),('pu', 3),('qing', 1),('jiang', 1),('wan', 4),('li', 3),('qiao', 2),
('hai', 3),('nei', 4),('feng', 1),('chen', 2),('zhu', 1),('di', 4),('ge', 2),
('tian', 1),('ya', 2),('ti', 4),('lei', 4),('yi', 4),('shen', 1),('yao', 2),
('wei', 2),('jiang', 1),('chi', 2),('mu', 4),('gong', 4),('duo', 1),('bing', 4),
('wei', 4),('you', 3),('juan', 1),('ai', 1),('da', 2),('sheng', 4),('chao', 2),
('kua', 4),('ma', 3),('chu', 1),('jiao', 1),('shi', 2),('ji', 2),('mu', 4),
('bu', 4),('kan', 1),('ren', 2),('shi', 4),('ri', 4),('xiao', 1),('tiao', 2),
]),
(7,[('jian', 4),('wai', 4),('hu', 1),('chuan', 2),('shou', 1),('ji', 4),('bei', 3),
('chu', 1),('wen', 2),('ti', 4),('lei', 4),('man', 3),('yi', 1),('shang', 4),
('que', 4),('kan', 4),('qi', 1),('zi', 3),('chou', 2),('he', 2),('zai', 4),
('man', 4),('juan', 3),('shi', 1),('shu', 1),('xi', 3),('yu', 4),('kuang', 2),
('bai', 2),('ri', 4),('fang', 4),('ge', 1),('xu', 1),('zong', 4),('jiu', 3),
('qing', 1),('chun', 1),('zuo', 4),('ban', 4),('hao', 3),('huan', 2),('xiang', 1),
('ji', 2),('cong', 2),('ba', 1),('xia', 2),('chuan', 1),('wu', 1),('xia', 2),
('bian', 4),('xia', 4),('xiang', 1),('yang', 2),('xiang', 4),('luo', 4),('yang', 2),
]),
(7,[('feng', 1),('ji', 2),('tian', 1),('gao', 1),('yuan', 2),('xiao', 4),('ai', 1),
('zhu', 3),('qing', 1),('sha', 1),('bai', 2),('niao', 3),('fei', 1),('huai', 2),
('wu', 2),('bian', 1),('luo', 4),('mu', 4),('xiao', 1),('xiao', 1),('xia', 4),
('bu', 2),('jin', 4),('chang', 2),('jiang', 1),('gun', 2),('gun', 3),('lai', 2),
('wan', 4),('li', 3),('bei', 1),('qiu', 1),('chang', 2),('zuo', 4),('ke', 4),
('bai', 3),('nian', 2),('duo', 1),('bing', 4),('du', 2),('deng', 1),('tai', 2),
('jian', 1),('nan', 2),('ku', 3),('hen', 4),('fan', 2),('shuang', 1),('bin', 4),
('liao', 2),('dao', 3),('xin', 1),('ting', 2),('zhuo', 2),('jiu', 3),('bai', 2),
]),
(7,[('hua', 1),('jin', 4),('gao', 1),('lou', 2),('shang', 1),('ke', 4),('xin', 1),
('wan', 4),('fang', 1),('duo', 1),('nan', 4),('ci', 3),('deng', 1),('lin', 2),
('jin', 3),('jiang', 1),('chun', 1),('se', 4),('lai', 2),('tian', 1),('di', 4),
('yu', 4),('lei', 3),('fu', 2),('yun', 2),('bian', 4),('gu', 3),('jin', 1),
('bei', 3),('ji', 2),('chao', 2),('ting', 2),('zhong', 1),('bu', 4),('gai', 3),
('xi', 1),('shan', 1),('kou', 4),('dao', 4),('mo', 4),('xiang', 1),('qin', 1),
('ke', 3),('lian', 2),('hou', 4),('zhu', 3),('huan', 2),('ci', 2),('miao', 4),
('ri', 4),('mu', 4),('liao', 2),('wei', 2),('liang', 2),('fu', 4),('yin', 2),
]),
(7,[('qing', 1),('qiu', 1),('mu', 4),('fu', 3),('jing', 3),('wu', 2),('han', 2),
('du', 2),('su', 4),('jiang', 1),('cheng', 2),('la', 4),('ju', 4),('can', 2),
('yong', 3),('ye', 4),('jiao', 3),('sheng', 1),('bei', 1),('zi', 4),('yu', 3),
('zhong', 1),('tian', 1),('yue', 4),('se', 4),('hao', 3),('shui', 2),('kan', 4),
('feng', 1),('chen', 2),('ren', 2),('ran', 3),('yin', 1),('shu', 1),('jue', 2),
('guan', 1),('sai', 4),('xiao', 1),('tiao', 2),('xing', 2),('lu', 4),('nan', 2),
('yi', 2),('ren', 3),('ling', 2),('ping', 1),('shi', 2),('nian', 2),('shi', 4),
('qiang', 2),('yi', 2),('qi', 1),('xi', 1),('yi', 4),('zhi', 1),('an', 1),
]),
(7,[('sui', 4),('mu', 4),('yin', 1),('yang', 2),('cui', 1),('duan', 2),('jing', 3),
('tian', 1),('ya', 2),('shuang', 1),('xue', 3),('ji', 4),('han', 2),('xiao', 1),
('wu', 3),('geng', 1),('gu', 2),('jiao', 3),('sheng', 1),('bei', 1),('zhuang', 4),
('san', 1),('xia', 2),('xing', 1),('he', 2),('ying', 3),('dong', 4),('yao', 2),
('ye', 3),('ku', 1),('qian', 1),('jia', 1),('wen', 2),('zhan', 4),('fa', 2),
('yi', 2),('ge', 1),('shu', 4),('chu', 4),('qi', 3),('yu', 2),('qiao', 2),
('wo', 4),('long', 2),('yue', 4),('ma', 3),('zhong', 1),('huang', 2),('tu', 3),
('ren', 2),('shi', 4),('yin', 1),('shu', 1),('man', 4),('ji', 4),('liao', 2),
]),
(7,[('zhi', 1),('li', 2),('dong', 1),('bei', 3),('feng', 1),('chen', 2),('ji', 4),
('piao', 1),('bo', 2),('xi', 1),('nan', 2),('tian', 1),('di', 4),('jian', 1),
('san', 1),('xia', 2),('lou', 2),('tai', 2),('yan', 1),('ri', 4),('yue', 4),
('wu', 3),('xi', 1),('yi', 1),('fu', 2),('gong', 4),('yun', 2),('shan', 1),
('jie', 2),('hu', 2),('shi', 4),('zhu', 3),('zhong', 1),('wu', 2),('lai', 4),
('ci', 2),('ke', 4),('ai', 1),('shi', 2),('qie', 3),('wei', 4),('huan', 2),
('yu', 3),('xin', 4),('ping', 2),('sheng', 1),('zui', 4),('xiao', 1),('se', 4),
('mu', 4),('nian', 2),('shi', 1),('fu', 4),('dong', 4),('jiang', 1),('guan', 1),
]),
(7,[('yao', 2),('luo', 4),('shen', 1),('zhi', 1),('song', 4),('yu', 4),('bei', 1),
('feng', 1),('liu', 2),('ru', 2),('ya', 3),('yi', 4),('wu', 2),('shi', 1),
('chang', 4),('wang', 4),('qian', 1),('qiu', 1),('yi', 4),('sa', 3),('lei', 4),
('xiao', 1),('tiao', 2),('yi', 4),('dai', 4),('bu', 4),('tong', 2),('shi', 2),
('jiang', 1),('shan', 1),('gu', 4),('zhai', 2),('kong', 1),('wen', 2),('zao', 3),
('yun', 2),('yu', 3),('huang', 1),('tai', 2),('qi', 3),('meng', 4),('si', 1),
('zui', 4),('shi', 4),('chu', 3),('gong', 1),('ju', 4),('min', 3),('mie', 4),
('zhou', 1),('ren', 2),('zhi', 2),('dian', 3),('dao', 4),('jin', 1),('yi', 2),
]),
(7,[('qun', 2),('shan', 1),('wan', 4),('he', 4),('fu', 4),('jing', 1),('men', 2),
('sheng', 1),('zhang', 3),('ming', 2),('fei', 1),('shang', 4),('you', 3),('cun', 1),
('yi', 2),('qu', 4),('zi', 3),('tai', 2),('lian', 2),('shuo', 4),('mo', 4),
('du', 2),('liu', 2),('qing', 1),('zhong', 3),('xiang', 4),('huang', 2),('hun', 1),
('hua', 4),('tu', 2),('xing', 3),('shi', 2),('chun', 1),('feng', 1),('mian', 4),
('huan', 2),('pei', 4),('kong', 1),('gui', 1),('yue', 4),('xia', 4),('hun', 2),
('qian', 1),('zai', 3),('pi', 2),('pa', 2),('zuo', 4),('hu', 2),('yu', 3),
('fen', 1),('ming', 2),('yuan', 4),('hen', 4),('qu', 3),('zhong', 1),('lun', 4),
]),
(7,[('shu', 2),('zhu', 3),('kui', 1),('wu', 2),('xing', 4),('san', 1),('xia', 2),
('beng', 1),('nian', 2),('yi', 4),('zai', 4),('yong', 3),('an', 1),('gong', 1),
('cui', 4),('hua', 2),('xiang', 3),('xiang', 4),('kong', 1),('shan', 1),('li', 3),
('yu', 4),('dian', 4),('xu', 1),('wu', 2),('ye', 3),('si', 4),('zhong', 1),
('gu', 3),('miao', 4),('shan', 1),('song', 1),('chao', 2),('shui', 3),('he', 4),
('sui', 4),('shi', 2),('fu', 2),('la', 4),('zou', 3),('cun', 1),('weng', 1),
('wu', 3),('hou', 2),('ci', 2),('wu', 1),('chang', 2),('lin', 2),('jin', 4),
('yi', 4),('ti', 3),('jun', 1),('chen', 2),('ji', 4),('si', 4),('tong', 2),
]),
(7,[('zhu', 1),('ge', 3),('da', 4),('ming', 2),('chui', 2),('yu', 3),('zhou', 4),
('zong', 1),('chen', 2),('yi', 2),('xiang', 4),('su', 4),('qing', 1),('gao', 1),
('san', 1),('fen', 1),('ge', 1),('ju', 4),('yu', 1),('chou', 2),('ce', 4),
('wan', 4),('gu', 3),('yun', 2),('xiao', 1),('yi', 4),('yu', 3),('mao', 2),
('bo', 2),('zhong', 4),('zhi', 1),('jian', 1),('jian', 4),('yi', 1),('lv', 3),
('zhi', 3),('hui', 1),('ruo', 4),('ding', 4),('shi', 1),('xiao', 1),('cao', 2),
('yun', 4),('yi', 2),('han', 4),('zuo', 4),('zhong', 1),('nan', 2),('fu', 4),
('zhi', 4),('jue', 2),('shen', 1),('jian', 1),('jun', 1),('wu', 4),('lao', 2),
]),
(7,[('sheng', 1),('ya', 2),('qi', 3),('liao', 4),('cheng', 2),('you', 1),('zhao', 4),
('shi', 4),('shi', 4),('kong', 1),('zhi', 1),('xue', 2),('zui', 4),('ge', 1),
('jiang', 1),('shang', 4),('yue', 4),('ming', 2),('hu', 2),('yan', 4),('guo', 4),
('huai', 2),('nan', 2),('mu', 4),('luo', 4),('chu', 3),('shan', 1),('duo', 1),
('ji', 4),('shen', 1),('qie', 2),('xi', 3),('cang', 1),('zhou', 1),('jin', 4),
('gu', 4),('ying', 3),('wu', 2),('ru', 2),('bai', 2),('fa', 4),('he', 2),
('jin', 1),('ri', 4),('long', 2),('zhong', 1),('ren', 2),('gong', 4),('lao', 3),
('kui', 4),('jun', 1),('you', 2),('qian', 3),('shen', 4),('feng', 1),('bo', 1),
]),
(7,[('san', 1),('nian', 2),('zhe', 2),('huan', 4),('ci', 3),('qi', 1),('chi', 2),
('wan', 4),('gu', 3),('wei', 2),('liu', 2),('chu', 3),('ke', 4),('bei', 1),
('qiu', 1),('cao', 3),('du', 2),('xun', 2),('ren', 2),('qu', 4),('hou', 4),
('han', 2),('lin', 2),('kong', 1),('jian', 4),('ri', 4),('xie', 2),('shi', 2),
('han', 4),('wen', 2),('you', 3),('dao', 4),('en', 1),('you', 2),('bo', 2),
('xiang', 1),('shui', 3),('wu', 2),('qing', 2),('diao', 4),('qi', 3),('zhi', 1),
('ji', 4),('ji', 4),('jiang', 1),('shan', 1),('yao', 2),('luo', 4),('chu', 4),
('lian', 2),('jun', 1),('he', 2),('shi', 4),('dao', 4),('tian', 1),('ya', 2),
]),
(7,[('ting', 1),('zhou', 1),('wu', 2),('lang', 4),('fu', 4),('wu', 2),('yan', 1),
('chu', 3),('ke', 4),('xiang', 1),('si', 1),('yi', 4),('miao', 3),('ran', 2),
('han', 4),('kou', 3),('xi', 1),('yang', 2),('xie', 2),('du', 4),('niao', 3),
('dong', 4),('ting', 2),('qiu', 1),('shui', 3),('yuan', 3),('lian', 2),('tian', 1),
('gu', 1),('cheng', 2),('bei', 4),('ling', 3),('han', 2),('chui', 1),('jiao', 3),
('du', 2),('shu', 4),('lin', 2),('jiang', 1),('ye', 4),('bo', 2),('chuan', 2),
('jia', 3),('yi', 4),('shang', 4),('shu', 1),('you', 1),('han', 4),('shi', 4),
('chang', 2),('sha', 1),('zhe', 2),('qu', 4),('gu', 3),('jin', 1),('lian', 2),
]),
(7,[('er', 4),('yue', 4),('huang', 2),('li', 2),('fei', 1),('shang', 4),('lin', 2),
('chun', 1),('cheng', 2),('zi', 3),('jin', 4),('xiao', 3),('yin', 1),('yin', 1),
('chang', 2),('le', 4),('zhong', 1),('sheng', 1),('hua', 1),('wai', 4),('jin', 4),
('long', 2),('chi', 2),('liu', 3),('se', 4),('yu', 3),('zhong', 1),('shen', 1),
('yang', 2),('he', 2),('bu', 2),('san', 4),('qiong', 2),('tu', 2),('hen', 4),
('xiao', 1),('han', 4),('chang', 2),('huai', 2),('peng', 3),('ri', 4),('xin', 1),
('xian', 4),('fu', 4),('shi', 2),('nian', 2),('you', 2),('wei', 4),('yu', 4),
('xiu', 1),('jiang', 1),('bai', 2),('fa', 4),('dui', 4),('hua', 2),('zan', 1),
]),
(7,[('qu', 4),('nian', 2),('hua', 1),('li', 3),('feng', 2),('jun', 1),('bie', 2),
('jin', 1),('ri', 4),('hua', 1),('kai', 1),('you', 4),('yi', 4),('nian', 2),
('shi', 4),('shi', 4),('mang', 2),('mang', 2),('nan', 2),('zi', 4),('liao', 4),
('chun', 1),('chou', 2),('an', 4),('an', 4),('du', 2),('cheng', 2),('mian', 2),
('shen', 1),('duo', 1),('ji', 2),('bing', 4),('si', 1),('tian', 2),('li', 3),
('yi', 4),('you', 3),('liu', 2),('wang', 2),('kui', 4),('feng', 4),('qian', 2),
('wen', 2),('dao', 4),('yu', 4),('lai', 2),('xiang', 1),('wen', 4),('xun', 4),
('xi', 1),('lou', 2),('wang', 4),('yue', 4),('ji', 3),('hui', 2),('yuan', 2),
]),
(7,[('xian', 1),('tai', 2),('chu', 1),('xian', 4),('wu', 3),('cheng', 2),('lou', 2),
('feng', 1),('wu', 4),('qi', 1),('qi', 1),('su', 4),('yu', 3),('shou', 1),
('shan', 1),('se', 4),('yao', 2),('lian', 2),('qin', 2),('shu', 4),('wan', 3),
('zhen', 1),('sheng', 1),('jin', 4),('bao', 4),('han', 4),('gong', 1),('qiu', 1),
('shu', 1),('song', 1),('ying', 3),('luo', 4),('kong', 1),('tan', 2),('jing', 4),
('xi', 4),('cao', 3),('xiang', 1),('xian', 2),('xiao', 3),('dong', 4),('you', 1),
('he', 2),('yong', 4),('bie', 2),('xun', 2),('fang', 1),('wai', 4),('qu', 4),
('ren', 2),('jian', 1),('yi', 4),('zi', 4),('you', 3),('dan', 1),('qiu', 1),
]),
(7,[('ying', 1),('ti', 2),('yan', 4),('yu', 3),('bao', 4),('xin', 1),('nian', 2),
('ma', 3),('yi', 4),('long', 2),('dui', 1),('lu', 4),('ji', 3),('qian', 1),
('jia', 1),('zhu', 4),('ceng', 2),('cheng', 2),('lin', 2),('han', 4),('yuan', 4),
('xin', 1),('sui', 2),('ming', 2),('yue', 4),('dao', 4),('hu', 2),('tian', 1),
('ji', 1),('zhong', 1),('jin', 3),('zi', 4),('lun', 4),('chang', 2),('hen', 4),
('lou', 2),('shang', 4),('hua', 1),('zhi', 1),('xiao', 4),('du', 2),('mian', 2),
('wei', 2),('wen', 4),('tian', 1),('rong', 2),('dou', 4),('che', 1),('ji', 4),
('he', 2),('shi', 2),('fan', 3),('pei', 4),('le', 4),('yan', 1),('ran', 2),
]),
(7,[('yun', 2),('kai', 1),('yuan', 3),('jian', 4),('han', 4),('yang', 2),('cheng', 2),
('you', 2),('shi', 4),('gu', 1),('fan', 1),('yi', 2),('ri', 4),('cheng', 2),
('gu', 1),('ke', 4),('zhou', 4),('mian', 2),('zhi', 1),('lang', 4),('jing', 4),
('zhou', 1),('ren', 2),('ye', 4),('yu', 3),('jue', 4),('chao', 2),('sheng', 1),
('san', 1),('xiang', 1),('chou', 2),('bin', 4),('feng', 2),('qiu', 1),('se', 4),
('wan', 4),('li', 3),('gui', 1),('xin', 1),('dui', 4),('yue', 4),('ming', 2),
('jiu', 4),('ye', 4),('yi', 3),('sui', 2),('zheng', 1),('zhan', 4),('jin', 4),
('geng', 4),('kan', 1),('jiang', 1),('shang', 4),('gu', 3),('pi', 2),('sheng', 1),
]),
(7,[('cheng', 2),('shang', 4),('gao', 1),('lou', 2),('jie', 1),('da', 4),('huang', 1),
('hai', 3),('tian', 1),('chou', 2),('si', 1),('zheng', 4),('mang', 2),('mang', 2),
('jing',1), ('feng',1), ('luan',4), ('zhan',3), ('fu',2), ('rong',2), ('shui',3),
('mi',4), ('yu',3), ('xie',2), ('qin',1), ('bi',4), ('zhi',1), ('qiang',2),
('ling', 3),('shu', 4),('chong', 2),('zhe', 1),('qian', 1),('li', 3),('mu', 4),
('jiang', 1),('liu', 2),('qu', 1),('si', 4),('jiu', 3),('hui', 2),('chang', 2),
('gong', 4),('lai', 2),('bai', 3),('yue', 4),('wen', 2),('shen', 1),('di', 4),
('you', 2),('zi', 4),('yin', 1),('shu', 1),('zhi', 4),('yi', 4),('xiang', 1)
]),
(7,[('wang', 2),('xun', 4),('lou', 2),('chuan', 2),('xia', 4),('yi', 4),('zhou', 1),
('jin', 1),('ling', 2),('wang', 2),('qi', 4),('an', 4),('ran', 2),('shou', 1),
('qian', 1),('xun', 2),('tie', 2),('suo', 3),('shen', 3),('jiang', 1),('di', 3),
('yi', 2),('pian', 4),('jiang', 4),('fan', 1),('chu', 1),('shi', 2),('tou', 2),
('ren', 2),('shi', 4),('ji', 3),('hui', 2),('shang', 1),('wang', 3),('shi', 4),
('shan', 1),('xing', 2),('yi', 1),('jiu', 4),('zhen', 3),('han', 2),('liu', 2),
('cong', 2),('jin', 1),('si', 4),('hai', | |
np.sum(a['chfhat'][ :, : , ntask, 1, 1 ], axis = 0) + \
np.sum(a['ghfhat'][ :, : , ntask, 1, 1 ], axis = 0)), \
np.sum(a['ghfhat'][ :, : , ntask, 0, 0 ], axis = 1)))
"""
#DEBUG. #############################################
import pdb; pdb.set_trace();
vd = np.zeros((n_total,n_total))
vd[ np.eye(n_total).astype('bool') ] = np.append(np.append(np.sum(a['ahfhat'][ :, : , ntask, 0, 0 ], axis = 1), \
np.sum(a['ahfhat'][ :, : , ntask, 1, 1 ], axis = 0) + np.sum(a['chfhat'][ :, : , ntask, 0, 0 ], axis = 1) + \
np.sum(a['chfhat'][ :, : , ntask, 1, 1 ], axis = 0) + np.sum(a['ghfhat'][ :, : , ntask, 1, 1 ], axis = 0)._value), \
np.sum(a['ghfhat'][ :, : , ntask, 0, 0 ], axis = 1)._value)
vd[ n_obs : n_obs + n_pset, n_obs : n_obs + n_pset ] = vd[ n_obs : n_obs + n_pset, n_obs : n_obs + n_pset ] + \
a['chfhat'][ :, : , ntask, 0, 1 ] + a['chfhat'][ :, : , ntask, 1, 0 ].T
vd[ 0 : n_obs, n_obs : n_obs + n_pset ] = a['ahfhat'][ :, :, ntask, 0, 1]
vd[ n_obs : n_obs + n_pset, 0 : n_obs ] = a['ahfhat'][ :, :, ntask, 0, 1].transpose()
vd[ n_obs + n_pset : n_total, n_obs : n_obs + n_pset ] = a['ghfhat'][ :, :, ntask, 0, 1]._value
vd[ n_obs : n_obs + n_pset, n_obs + n_pset : n_total ] = a['ghfhat'][ :, :, ntask, 0, 1].transpose()._value
######################################################
"""
"""
vTilde[ np.eye(n_total).astype('bool') ] = np.append(np.append(np.sum(a['ahfhat'][ :, : , ntask, 0, 0 ], axis = 1), \
np.sum(a['ahfhat'][ :, : , ntask, 1, 1 ], axis = 0) + np.sum(a['chfhat'][ :, : , ntask, 0, 0 ], axis = 1) + \
np.sum(a['chfhat'][ :, : , ntask, 1, 1 ], axis = 0) + np.sum(a['ghfhat'][ :, : , ntask, 1, 1 ], axis = 0)), \
np.sum(a['ghfhat'][ :, : , ntask, 0, 0 ], axis = 1))
"""
block_2 = block_1[ n_obs : n_obs + n_pset, n_obs : n_obs + n_pset ] + a['chfhat'][ :, : , ntask, 0, 1 ] + a['chfhat'][ :, : , ntask, 1, 0 ].T
#vTilde[ n_obs : n_obs + n_pset, n_obs : n_obs + n_pset ] = vTilde[ n_obs : n_obs + n_pset, n_obs : n_obs + n_pset ] + \
#a['chfhat'][ :, : , ntask, 0, 1 ] + a['chfhat'][ :, : , ntask, 1, 0 ].T
block_3 = a['ahfhat'][ :, :, ntask, 0, 1]
block_4 = a['ahfhat'][ :, :, ntask, 0, 1].transpose()
block_5 = a['ghfhat'][ :, :, ntask, 0, 1]
block_6 = a['ghfhat'][ :, :, ntask, 0, 1].transpose()
#Building the matrix.
vTilde = np.array([])
for x_index in range(n_total):
for y_index in range(n_total):
#Block_2
if (x_index >= n_obs and x_index < n_obs + n_pset and y_index >= n_obs and y_index < n_obs + n_pset) \
or (x_index==y_index and x_index >= n_obs and x_index < n_obs + n_pset and y_index >= n_obs and y_index < n_obs + n_pset):
vTilde = np.append(vTilde, block_2[x_index - n_obs, y_index - n_obs])
#Block_1
elif x_index == y_index:
vTilde = np.append(vTilde, block_1[x_index, y_index])
#Block_3
elif x_index < n_obs and y_index >= n_obs and y_index < n_obs + n_pset:
vTilde = np.append(vTilde, block_3[x_index, y_index - n_obs])
#Block_4
elif x_index >= n_obs and x_index < n_obs + n_pset and y_index < n_obs:
vTilde = np.append(vTilde, block_4[x_index - n_obs, y_index])
#Block_5
elif x_index >= n_obs + n_pset and y_index >= n_obs and y_index < n_obs + n_pset:
vTilde = np.append(vTilde, block_5[x_index - n_obs - n_pset, y_index - n_obs])
#Block_6
elif x_index >= n_obs and x_index < n_obs + n_pset and y_index >= n_obs + n_pset:
vTilde = np.append(vTilde, block_6[x_index - n_obs, y_index - n_obs - n_pset])
#Default 0
else:
vTilde = np.append(vTilde, 1e-15)
#TODO: Test that acq==autograd_acq. No da lo mismo. Hacer una version con cambios y otra sin, ir incorporandolos.
#TODO: Test that grad_acq==autograd_grad_acq
vTilde = vTilde.reshape((n_total, n_total))
"""
vTilde[ 0 : n_obs, n_obs : n_obs + n_pset ] = a['ahfhat'][ :, :, ntask, 0, 1]
vTilde[ n_obs : n_obs + n_pset, 0 : n_obs ] = a['ahfhat'][ :, :, ntask, 0, 1].transpose()
vTilde[ n_obs + n_pset : n_total, n_obs : n_obs + n_pset ] = a['ghfhat'][ :, :, ntask, 0, 1]
vTilde[ n_obs : n_obs + n_pset, n_obs + n_pset : n_total ] = a['ghfhat'][ :, :, ntask, 0, 1].transpose()
"""
a['Vinv'][obj] = a['VpredInv'][obj] + vTilde
a['V'][obj] = np.linalg.inv(a['VpredInv'][obj] + vTilde)
mTilde = np.append(np.append(np.sum(a['bhfhat'][ :, : , ntask, 0 ], axis = 1),
np.sum(a['bhfhat'][ :, : , ntask, 1 ], axis = 0) + np.sum(a['hhfhat'][ :, : , ntask, 1 ], axis = 0) +\
np.sum(a['dhfhat'][ :, : , ntask, 0 ], axis = 1) + np.sum(a['dhfhat'][ :, : , ntask, 1 ], axis = 0)), \
np.sum(a['hhfhat'][ :, : , ntask, 0 ], axis = 1))
a['m_nat'][obj] = np.dot(a['VpredInv'][obj], a['mPred'][obj]) + mTilde
a['m'][obj] = np.dot(a['V'][obj], a['m_nat'][ obj ])
ntask = ntask + 1
return a
def get_test_predictive_distributions(a):
n_obs = a['n_obs']
n_pset = a['n_pset']
n_test = a['n_test']
n_total = a['n_total']
q = len(a['objs'])
c = len(a['cons'])
predictive_distributions = {
'mf' : defaultdict(lambda: np.zeros(n_test)),
'vf' : defaultdict(lambda: np.zeros((n_test, n_test))),
'mc' : defaultdict(lambda: np.zeros(n_test)),
'vc' : defaultdict(lambda: np.zeros((n_test, n_test))),
}
for obj in a['objs'].keys():
predictive_distributions['mf'][ obj ] = a['m'][ obj ][ n_obs + n_pset : n_total ]
predictive_distributions['vf'][ obj ] = a['V'][ obj ][ n_obs + n_pset : n_total , n_obs + n_pset : n_total ]
for cons in a['cons'].keys():
predictive_distributions['mc'][ cons ] = a['m_cons'][ cons ][ n_obs + n_pset : n_total ]
predictive_distributions['vc'][ cons ] = a['V_cons'][ cons ][ n_obs + n_pset : n_total , n_obs + n_pset : n_total ]
return predictive_distributions, a
def compute_PPESMOC_approximation(predictionEP, obj_models_dict, con_models, unconstrainedVariances, constrainedVariances, acq):
predictionEP_obj = predictionEP[ 'vf' ]
predictionEP_cons = predictionEP[ 'vc' ]
# DHL changed fill_diag, because that was updating the a structure and screwing things up later on
for obj in obj_models_dict:
predictionEP_obj[ obj ] = predictionEP_obj[ obj ] + np.eye(predictionEP_obj[ obj ].shape[ 0 ]) * obj_models_dict[ obj ].noise_value()
constrainedVariances[ obj ] = predictionEP_obj[ obj ]
for cons in con_models:
predictionEP_cons[ cons ] = predictionEP_cons[ cons ] + np.eye(predictionEP_cons[ obj ].shape[ 0 ]) * con_models[ cons ].noise_value()
constrainedVariances[ cons ] = predictionEP_cons[ cons ]
# We only care about the variances because the means do not affect the entropy
# The summation of the acq of the tasks (t) is done in a higher method. Do no do it here.
for t in unconstrainedVariances:
# DHL replaced np.log(np.linalg.det()) to avoid precision errors
value = 0.5 * np.linalg.slogdet(unconstrainedVariances[t])[ 1 ] - 0.5 * np.linalg.slogdet(constrainedVariances[t])[ 1 ]
# We set negative values of the acquisition function to zero because the
# entropy cannot be increased when conditioning
value = np.max(value, 0)
acq[t] += value
return acq
def update_full_Factors_only_test_factors(a, damping, minimize=True, no_negative_variances_nor_nands = False, no_negatives = True):
# used to switch between minimizing and maximizing
sgn = -1.0 if minimize else 1.0
# We update the h factors
all_tasks = a['objs']
all_constraints = a['cons']
n_obs = a['n_obs']
n_pset = a['n_pset']
n_test = a['n_test']
n_total = a['n_total']
q = a['q']
c = a['c']
alpha = np.zeros(a['q'])
s = np.zeros(a['q'])
ratio_cons = np.zeros(c)
# First we update the factors corresponding to the observed data
# We compute an "old" distribution
# Data structures for objective npset ntest cavities (a, b).
m_pset = np.array([])
#m_pset = np.zeros((q, n_pset, n_test))
m_test = np.array([])
#m_test = np.zeros((q, n_pset, n_test))
v_pset = np.array([])
#v_pset = np.zeros((q, n_pset, n_test))
v_test = np.array([])
#v_test = np.zeros((q, n_pset, n_test))
v_cov = np.array([])
#v_cov = np.zeros((q, n_pset, n_test))
# Data structures for constraint npset nobs cavities (c_a, c_b).
c_m = np.array([])
#c_m = np.zeros((c, n_pset, n_test))
c_v = np.array([])
#c_v = np.zeros((c, n_pset, n_test))
# Update marginals: a['m'] , a['V']
#n_task = 0
for obj in all_tasks: #OK
m_test = np.append(m_test, np.tile(a['m'][ obj ][ | |
<gh_stars>10-100
import argparse
import glob
import os
import re
import sys
import types
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info < (2, 7):
from ordereddict import OrderedDict
else:
from collections import OrderedDict
ACTION_TYPES_THAT_DONT_NEED_A_VALUE = (argparse._StoreTrueAction,
argparse._StoreFalseAction, argparse._CountAction,
argparse._StoreConstAction, argparse._AppendConstAction)
# global ArgumentParser instances
_parsers = {}
def init_argument_parser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using get_argument_parser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs)
def get_argument_parser(name=None, **kwargs):
"""Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
"""
if name is None:
name = "default"
if len(kwargs) > 0 or name not in _parsers:
init_argument_parser(name, **kwargs)
return _parsers[name]
class ArgumentDefaultsRawHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter,
argparse.RawDescriptionHelpFormatter):
"""HelpFormatter that adds default values AND doesn't do line-wrapping"""
pass
class ConfigFileParser(object):
"""This abstract class can be extended to add support for new config file
formats"""
def get_syntax_description(self):
"""Returns a string describing the config file syntax."""
raise NotImplementedError("get_syntax_description(..) not implemented")
def parse(self, stream):
"""Parses the keys and values from a config file.
NOTE: For keys that were specified to configargparse as
action="store_true" or "store_false", the config file value must be
one of: "yes", "no", "true", "false". Otherwise an error will be raised.
Args:
stream: A config file input stream (such as an open file object).
Returns:
OrderedDict of items where the keys have type string and the
values have type either string or list (eg. to support config file
formats like YAML which allow lists).
"""
raise NotImplementedError("parse(..) not implemented")
def serialize(self, items):
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
Args:
items: an OrderedDict of items to be converted to the config file
format. Keys should be strings, and values should be either strings
or lists.
Returns:
Contents of config file as a string
"""
raise NotImplementedError("serialize(..) not implemented")
class ConfigFileParserException(Exception):
"""Raised when config file parsing failed."""
class DefaultConfigFileParser(ConfigFileParser):
"""Based on a simplified subset of INI and YAML formats. Here is the
supported syntax:
# this is a comment
; this is also a comment (.ini style)
--- # lines that start with --- are ignored (yaml style)
-------------------
[section] # .ini-style section names are treated as comments
# how to specify a key-value pair (all of these are equivalent):
name value # key is case sensitive: "Name" isn't "name"
name = value # (.ini style) (white space is ignored, so name = value same as name=value)
name: value # (yaml style)
--name value # (argparse style)
# how to set a flag arg (eg. arg which has action="store_true")
--name
name
name = True # "True" and "true" are the same
# how to specify a list arg (eg. arg which has action="append")
fruit = [apple, orange, lemon]
indexes = [1, 12, 35 , 40]
"""
def get_syntax_description(self):
msg = ("Config file syntax allows: key=value, flag=true, stuff=[a,b,c] "
"(for details, see syntax at https://goo.gl/R74nmi).")
return msg
def parse(self, stream):
"""Parses the keys + values from a config file."""
items = OrderedDict()
for i, line in enumerate(stream):
line = line.strip()
if not line or line[0] in ["#", ";", "["] or line.startswith("---"):
continue
white_space = "\\s*"
key = "(?P<key>[^:=;#\s]+?)"
value = white_space+"[:=\s]"+white_space+"(?P<value>.+?)"
comment = white_space+"(?P<comment>\\s[;#].*)?"
key_only_match = re.match("^" + key + comment + "$", line)
if key_only_match:
key = key_only_match.group("key")
items[key] = "true"
continue
key_value_match = re.match("^"+key+value+comment+"$", line)
if key_value_match:
key = key_value_match.group("key")
value = key_value_match.group("value")
if value.startswith("[") and value.endswith("]"):
# handle special case of lists
value = [elem.strip() for elem in value[1:-1].split(",")]
items[key] = value
continue
raise ConfigFileParserException("Unexpected line %s in %s: %s" % (i,
getattr(stream, 'name', 'stream'), line))
return items
def serialize(self, items):
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
"""
r = StringIO()
for key, value in items.items():
if isinstance(value, list):
# handle special case of lists
value = "["+", ".join(map(str, value))+"]"
r.write("%s = %s\n" % (key, value))
return r.getvalue()
class YAMLConfigFileParser(ConfigFileParser):
"""Parses YAML config files. Depends on the PyYAML module.
https://pypi.python.org/pypi/PyYAML
"""
def get_syntax_description(self):
msg = ("The config file uses YAML syntax and must represent a YAML "
"'mapping' (for details, see http://learn.getgrav.org/advanced/yaml).")
return msg
def _load_yaml(self):
"""lazy-import PyYAML so that configargparse doesn't have to dependend
on it unless this parser is used."""
try:
import yaml
except ImportError:
raise ConfigFileParserException("Could not import yaml. "
"It can be installed by running 'pip install PyYAML'")
return yaml
def parse(self, stream):
"""Parses the keys and values from a config file."""
yaml = self._load_yaml()
try:
parsed_obj = yaml.safe_load(stream)
except Exception as e:
raise ConfigFileParserException("Couldn't parse config file: %s" % e)
if not isinstance(parsed_obj, dict):
raise ConfigFileParserException("The config file doesn't appear to "
"contain 'key: value' pairs (aka. a YAML mapping). "
"yaml.load('%s') returned type '%s' instead of 'dict'." % (
getattr(stream, 'name', 'stream'), type(parsed_obj).__name__))
result = OrderedDict()
for key, value in parsed_obj.items():
if isinstance(value, list):
result[key] = value
else:
result[key] = str(value)
return result
def serialize(self, items, default_flow_style=False):
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents.
Args:
default_flow_style: defines serialization format (see PyYAML docs)
"""
# lazy-import so there's no dependency on yaml unless this class is used
yaml = self._load_yaml()
# it looks like ordering can't be preserved: http://pyyaml.org/ticket/29
items = dict(items)
return yaml.dump(items, default_flow_style=default_flow_style)
# used while parsing args to keep track of where they came from
_COMMAND_LINE_SOURCE_KEY = "command_line"
_ENV_VAR_SOURCE_KEY = "environment_variables"
_CONFIG_FILE_SOURCE_KEY = "config_file"
_DEFAULTS_SOURCE_KEY = "defaults"
class ArgumentParser(argparse.ArgumentParser):
"""Drop-in replacement for argparse.ArgumentParser that adds support for
environment variables and .ini or .yaml-style config files.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=argparse.HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
add_config_file_help=True,
add_env_var_help=True,
auto_env_var_prefix=None,
default_config_files=[],
ignore_unknown_config_file_keys=False,
config_file_parser_class=DefaultConfigFileParser,
args_for_setting_config_path=[],
config_arg_is_required=False,
config_arg_help_message="config file path",
args_for_writing_out_config_file=[],
write_out_config_file_arg_help_message="takes the current command line "
"args and writes them out to a config file at the given path, then "
"exits",
allow_abbrev=True, # new in python 3.5
):
"""Supports all the same args as the argparse.ArgumentParser
constructor, as well as the following additional args.
Additional Args:
add_config_file_help: Whether to add a description of config file
syntax to the help message.
add_env_var_help: Whether to add something to the help message for
args that can be set through environment variables.
auto_env_var_prefix: If set to a string instead of None, all config-
file-settable options will become also settable via environment
variables whose names are this prefix followed by the config
file key, all in upper case. (eg. setting this to "foo_" will
allow an arg like "--my-arg" to also be set via the FOO_MY_ARG
environment variable)
default_config_files: When specified, this list of config files will
be parsed in order, with the values from each config file
taking precedence over pervious ones. This allows an application
to look for config files in multiple standard locations such as
the install directory, home directory, and current directory.
Also, shell * syntax can be used to specify all conf files in a
directory. For exmaple:
["/etc/conf/app_config.ini",
"/etc/conf/conf-enabled/*.ini",
"~/.my_app_config.ini",
"./app_config.txt"]
ignore_unknown_config_file_keys: If true, settings that are found
in a config file but don't correspond to any defined
configargparse args will be ignored. If false, they will be
processed and appended to the commandline like other args, and
can be retrieved using parse_known_args() instead of parse_args()
config_file_parser_class: configargparse.ConfigFileParser subclass
which determines the config file format. configargparse comes
with DefaultConfigFileParser and YAMLConfigFileParser.
args_for_setting_config_path: A list of one or more command line
args to be | |
import numpy as np
import os
import os.path as osp
from unittest import TestCase
from datumaro.components.dataset_filter import (
XPathDatasetFilter, XPathAnnotationsFilter, DatasetItemEncoder)
from datumaro.components.dataset import (Dataset, DEFAULT_FORMAT, ItemStatus,
eager_mode)
from datumaro.components.environment import Environment
from datumaro.components.errors import DatumaroError, RepeatedItemError
from datumaro.components.extractor import (DEFAULT_SUBSET_NAME, Extractor,
DatasetItem, Label, Mask, Points, Polygon, PolyLine, Bbox, Caption,
LabelCategories, AnnotationType, Transform)
from datumaro.util.image import Image
from datumaro.util.test_utils import TestDir, compare_datasets
class DatasetTest(TestCase):
def test_create_from_extractors(self):
class SrcExtractor1(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
]),
])
class SrcExtractor2(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='val', annotations=[
Label(5),
]),
])
class DstExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(4),
Label(5),
]),
])
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
compare_datasets(self, DstExtractor(), dataset)
def test_can_create_from_iterable(self):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
])
def categories(self):
return { AnnotationType.label: LabelCategories.from_iterable(
['a', 'b', 'c', 'd', 'e'])
}
actual = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Bbox(1, 2, 3, 4, label=2),
Label(4),
]),
DatasetItem(id=1, subset='val', annotations=[
Label(3),
]),
], categories=['a', 'b', 'c', 'd', 'e'])
compare_datasets(self, TestExtractor(), actual)
def test_can_join_datasets_with_empty_categories(self):
expected = Dataset.from_iterable([
DatasetItem(1, annotations=[
Label(0),
Bbox(1, 2, 3, 4),
Caption('hello world'),
])
], categories=['a'])
src1 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])
], categories=[])
src2 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Label(0) ])
], categories=['a'])
src3 = Dataset.from_iterable([
DatasetItem(1, annotations=[ Caption('hello world') ])
])
actual = Dataset.from_extractors(src1, src2, src3)
compare_datasets(self, expected, actual)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
loaded_dataset = Dataset.load(test_dir)
compare_datasets(self, source_dataset, loaded_dataset)
def test_can_detect(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
dataset.save(test_dir)
detected_format = Dataset.detect(test_dir, env=env)
self.assertEqual(DEFAULT_FORMAT, detected_format)
def test_can_detect_and_import(self):
env = Environment()
env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}
env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'])
with TestDir() as test_dir:
source_dataset.save(test_dir)
imported_dataset = Dataset.import_from(test_dir, env=env)
self.assertEqual(imported_dataset.data_path, test_dir)
self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)
compare_datasets(self, source_dataset, imported_dataset)
def test_can_export_by_string_format_name(self):
env = Environment()
env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
with TestDir() as test_dir:
dataset.export(format='qq', save_dir=test_dir)
def test_can_transform_by_string_name(self):
expected = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ], attributes={'qq': 1}),
], categories=['a', 'b', 'c'])
class TestTransform(Transform):
def transform_item(self, item):
return self.wrap_item(item, attributes={'qq': 1})
env = Environment()
env.transforms.items = {'qq': TestTransform}
dataset = Dataset.from_iterable([
DatasetItem(id=1, annotations=[ Label(2) ]),
], categories=['a', 'b', 'c'], env=env)
actual = dataset.transform('qq')
compare_datasets(self, expected, actual)
def test_can_join_annotations(self):
a = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
])
], categories=['a', 'b', 'c', 'd'])
b = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
expected = Dataset.from_iterable([
DatasetItem(id=1, subset='train', annotations=[
Label(1, id=3),
Label(2, attributes={ 'x': 1 }),
Label(3, id=4),
])
], categories=['a', 'b', 'c', 'd'])
merged = Dataset.from_extractors(a, b)
compare_datasets(self, expected, merged)
def test_cant_join_different_categories(self):
s1 = Dataset.from_iterable([], categories=['a', 'b'])
s2 = Dataset.from_iterable([], categories=['b', 'a'])
with self.assertRaisesRegex(DatumaroError, "different categories"):
Dataset.from_extractors(s1, s2)
def test_can_join_datasets(self):
s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])
s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])
expected = Dataset.from_iterable([
DatasetItem(0), DatasetItem(1), DatasetItem(2)
])
actual = Dataset.from_extractors(s1, s2)
compare_datasets(self, expected, actual)
def test_inplace_save_writes_only_updated_data(self):
with TestDir() as path:
# generate initial dataset
dataset = Dataset.from_iterable([
DatasetItem(1, subset='a'),
DatasetItem(2, subset='b'),
DatasetItem(3, subset='c'),
])
dataset.save(path)
os.unlink(osp.join(
path, 'annotations', 'a.json')) # should be rewritten
os.unlink(osp.join(
path, 'annotations', 'b.json')) # should not be rewritten
os.unlink(osp.join(
path, 'annotations', 'c.json')) # should not be rewritten
dataset.put(DatasetItem(2, subset='a'))
dataset.remove(3, 'c')
dataset.save()
self.assertTrue(osp.isfile(osp.join(path, 'annotations', 'a.json')))
self.assertFalse(osp.isfile(osp.join(path, 'annotations', 'b.json')))
self.assertTrue(osp.isfile(osp.join(path, 'annotations', 'c.json')))
def test_can_track_modifications_on_addition(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.put(DatasetItem(3, subset='a'))
self.assertTrue(dataset.is_modified)
def test_can_track_modifications_on_removal(self):
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
self.assertFalse(dataset.is_modified)
dataset.remove(1)
self.assertTrue(dataset.is_modified)
def test_can_create_patch(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.patch
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.modified,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
def test_can_create_more_precise_patch_when_cached(self):
expected = Dataset.from_iterable([
DatasetItem(2),
DatasetItem(3, subset='a')
])
dataset = Dataset.from_iterable([
DatasetItem(1),
DatasetItem(2),
])
dataset.init_cache()
dataset.put(DatasetItem(2))
dataset.put(DatasetItem(3, subset='a'))
dataset.remove(1)
patch = dataset.patch
self.assertEqual({
('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,
('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,
('3', 'a'): ItemStatus.added,
}, patch.updated_items)
self.assertEqual({
'default': ItemStatus.modified,
'a': ItemStatus.modified,
}, patch.updated_subsets)
self.assertEqual(2, len(patch.data))
self.assertEqual(None, patch.data.get(1))
self.assertEqual(dataset.get(2), patch.data.get(2))
self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))
compare_datasets(self, expected, dataset)
def test_can_do_lazy_put_and_remove(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
self.assertFalse(dataset.is_cache_initialized)
dataset.put(DatasetItem(3))
dataset.remove(DatasetItem(1))
self.assertFalse(dataset.is_cache_initialized)
self.assertFalse(iter_called)
dataset.init_cache()
self.assertTrue(dataset.is_cache_initialized)
self.assertTrue(iter_called)
def test_can_put(self):
dataset = Dataset()
dataset.put(DatasetItem(1))
self.assertTrue((1, '') in dataset)
def test_can_do_lazy_get_on_updated_item(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.put(DatasetItem(2))
self.assertTrue((2, '') in dataset)
self.assertFalse(iter_called)
def test_can_switch_eager_and_lazy_with_cm_global(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
])
with eager_mode():
Dataset.from_extractors(TestExtractor())
self.assertTrue(iter_called)
def test_can_switch_eager_and_lazy_with_cm_local(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
with eager_mode(dataset=dataset):
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertTrue(iter_called)
def test_can_do_lazy_select(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.select(lambda item: int(item.id) < 3)
dataset.select(lambda item: int(item.id) < 2)
self.assertFalse(iter_called)
self.assertEqual(1, len(dataset))
self.assertTrue(iter_called)
def test_can_chain_lazy_tranforms(self):
iter_called = False
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called = True
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
class TestTransform(Transform):
def transform_item(self, item):
return self.wrap_item(item, id=int(item.id) + 1)
dataset.transform(TestTransform)
dataset.transform(TestTransform)
self.assertFalse(iter_called)
self.assertEqual(4, len(dataset))
self.assertEqual(3, int(min(int(item.id) for item in dataset)))
self.assertTrue(iter_called)
def test_raises_when_repeated_items_in_source(self):
dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])
with self.assertRaises(RepeatedItemError):
dataset.init_cache()
def test_can_check_item_existence(self):
dataset = Dataset.from_iterable([
DatasetItem(0, subset='a'), DatasetItem(1)
])
self.assertTrue(DatasetItem(0, subset='a') in dataset)
self.assertFalse(DatasetItem(0, subset='b') in dataset)
self.assertTrue((0, 'a') in dataset)
self.assertFalse((0, 'b') in dataset)
self.assertTrue(1 in dataset)
self.assertFalse(0 in dataset)
def test_can_put_with_id_override(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')
self.assertTrue((2, 'b') in dataset)
def test_can_compute_cache_with_empty_source(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(2))
dataset.init_cache()
self.assertTrue(2 in dataset)
def test_cant_do_partial_caching_in_get_when_default(self):
iter_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(1, iter_called)
def test_can_do_partial_caching_in_get_when_redefined(self):
iter_called = 0
get_called = 0
class TestExtractor(Extractor):
def __iter__(self):
nonlocal iter_called
iter_called += 1
return iter([
DatasetItem(1),
DatasetItem(2),
DatasetItem(3),
DatasetItem(4),
])
def get(self, id, subset=None): #pylint: disable=redefined-builtin
nonlocal get_called
get_called += 1
return DatasetItem(id, subset=subset)
dataset = Dataset.from_extractors(TestExtractor())
dataset.get(3)
dataset.get(4)
self.assertEqual(0, iter_called)
self.assertEqual(2, get_called)
def test_binds_on_save(self):
dataset = Dataset.from_iterable([DatasetItem(1)])
self.assertFalse(dataset.is_bound)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertTrue(dataset.is_bound)
self.assertEqual(dataset.data_path, test_dir)
self.assertEqual(dataset.format, DEFAULT_FORMAT)
def test_flushes_changes_on_save(self):
dataset = Dataset.from_iterable([])
dataset.put(DatasetItem(1))
self.assertTrue(dataset.is_modified)
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(dataset.is_modified)
def test_does_not_load_images_on_saving(self):
# Issue https://github.com/openvinotoolkit/datumaro/issues/177
# Missing image metadata (size etc.) can lead to image loading on
# dataset save without image saving
called = False
def test_loader():
nonlocal called
called = True
dataset = Dataset.from_iterable([
DatasetItem(1, image=test_loader)
])
with TestDir() as test_dir:
dataset.save(test_dir)
self.assertFalse(called)
class DatasetItemTest(TestCase):
def test_ctor_requires_id(self):
with self.assertRaises(Exception):
# pylint: disable=no-value-for-parameter
DatasetItem()
# pylint: enable=no-value-for-parameter
@staticmethod
def test_ctors_with_image():
for args in [
{ 'id': 0, 'image': None },
{ 'id': 0, 'image': 'path.jpg' },
{ 'id': 0, 'image': np.array([1, 2, 3]) },
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
]:
DatasetItem(**args)
class DatasetFilterTest(TestCase):
@staticmethod
def test_item_representations():
item = DatasetItem(id=1, subset='subset', path=['a', 'b'],
image=np.ones((5, 4, 3)),
annotations=[
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
Caption('hello', id=1),
Caption('world', group=5),
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
Bbox(5, 6, 7, 8, id=5, group=5),
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
Mask(id=5, image=np.ones((3, 2))),
Mask(label=3, id=5, image=np.ones((2, 3))),
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
Polygon([1, 2, 3, 4, 5, 6, | |
(A user's username.), parameter "public_read" of Long,
parameter "at_least" of type "boolean" (A boolean value, 0 for
false, 1 for true.), parameter "as_admin" of type "boolean" (A
boolean value, 0 for false, 1 for true.)
"""
return self._client.run_job('SampleService.update_sample_acls',
[params], self._service_ver, context)
def replace_sample_acls(self, params, context=None):
"""
Completely overwrite a sample's ACLs. Any current ACLs are replaced by the provided
ACLs, even if empty, and gone forever.
The sample owner cannot be changed via this method.
:param params: instance of type "ReplaceSampleACLsParams"
(replace_sample_acls parameters. id - the ID of the sample to
modify. acls - the ACLs to set on the sample. as_admin - replace
the sample acls regardless of sample ACL contents as long as the
user has full service administration permissions.) -> structure:
parameter "id" of type "sample_id" (A Sample ID. Must be globally
unique. Always assigned by the Sample service.), parameter "acls"
of type "SampleACLs" (Access control lists for a sample. Access
levels include the privileges of the lower access levels. owner -
the user that created and owns the sample. admin - users that can
administrate (e.g. alter ACLs) the sample. write - users that can
write (e.g. create a new version) to the sample. read - users that
can view the sample. public_read - whether any user can read the
sample, regardless of permissions.) -> structure: parameter
"owner" of type "user" (A user's username.), parameter "admin" of
list of type "user" (A user's username.), parameter "write" of
list of type "user" (A user's username.), parameter "read" of list
of type "user" (A user's username.), parameter "public_read" of
type "boolean" (A boolean value, 0 for false, 1 for true.),
parameter "as_admin" of type "boolean" (A boolean value, 0 for
false, 1 for true.)
"""
return self._client.run_job('SampleService.replace_sample_acls',
[params], self._service_ver, context)
def get_metadata_key_static_metadata(self, params, context=None):
"""
Get static metadata for one or more metadata keys.
The static metadata for a metadata key is metadata *about* the key - e.g. it may
define the key's semantics or denote that the key is linked to an ontological ID.
The static metadata does not change without the service being restarted. Client caching is
recommended to improve performance.
:param params: instance of type "GetMetadataKeyStaticMetadataParams"
(get_metadata_key_static_metadata parameters. keys - the list of
metadata keys to interrogate. prefix - 0 (the default) to
interrogate standard metadata keys. 1 to interrogate prefix
metadata keys, but require an exact match to the prefix key. 2 to
interrogate prefix metadata keys, but any keys which are a prefix
of the provided keys will be included in the results.) ->
structure: parameter "keys" of list of type "metadata_key" (A key
in a metadata key/value pair. Less than 1000 unicode characters.),
parameter "prefix" of Long
:returns: instance of type "GetMetadataKeyStaticMetadataResults"
(get_metadata_key_static_metadata results. static_metadata - the
static metadata for the requested keys.) -> structure: parameter
"static_metadata" of type "metadata" (Metadata attached to a
sample.) -> mapping from type "metadata_key" (A key in a metadata
key/value pair. Less than 1000 unicode characters.) to type
"metadata_value" (A metadata value, represented by a mapping of
value keys to primitive values. An example for a location metadata
key might be: { "name": "<NAME>", "lat": 44.463816, "long":
-110.836471 } "primitive values" means an int, float, string, or
equivalent typedefs. Including any collection types is an error.)
-> mapping from type "metadata_value_key" (A key for a value
associated with a piece of metadata. Less than 1000 unicode
characters. Examples: units, value, species) to unspecified object
"""
return self._client.run_job('SampleService.get_metadata_key_static_metadata',
[params], self._service_ver, context)
def create_data_link(self, params, context=None):
"""
Create a link from a KBase Workspace object to a sample.
The user must have admin permissions for the sample and write permissions for the
Workspace object.
:param params: instance of type "CreateDataLinkParams"
(create_data_link parameters. upa - the workspace UPA of the
object to be linked. dataid - the dataid of the data to be linked,
if any, within the object. If omitted the entire object is linked
to the sample. id - the sample id. version - the sample version.
node - the sample node. update - if false (the default), fail if a
link already exists from the data unit (the combination of the UPA
and dataid). if true, expire the old link and create the new link
unless the link is already to the requested sample node, in which
case the operation is a no-op. as_admin - run the method as a
service administrator. The user must have full administration
permissions. as_user - create the link as a different user.
Ignored if as_admin is not true. Neither the administrator nor the
impersonated user need have permissions to the data or sample.) ->
structure: parameter "upa" of type "ws_upa" (A KBase Workspace
service Unique Permanent Address (UPA). E.g. 5/6/7 where 5 is the
workspace ID, 6 the object ID, and 7 the object version.),
parameter "dataid" of type "data_id" (An id for a unit of data
within a KBase Workspace object. A single object may contain many
data units. A dataid is expected to be unique within a single
object. Must be less than 255 characters.), parameter "id" of type
"sample_id" (A Sample ID. Must be globally unique. Always assigned
by the Sample service.), parameter "version" of type "version"
(The version of a sample. Always > 0.), parameter "node" of type
"node_id" (A SampleNode ID. Must be unique within a Sample and be
less than 255 characters.), parameter "update" of type "boolean"
(A boolean value, 0 for false, 1 for true.), parameter "as_admin"
of type "boolean" (A boolean value, 0 for false, 1 for true.),
parameter "as_user" of type "user" (A user's username.)
:returns: instance of type "CreateDataLinkResults" (create_data_link
results. new_link - the new link.) -> structure: parameter
"new_link" of type "DataLink" (A data link from a KBase workspace
object to a sample. upa - the workspace UPA of the linked object.
dataid - the dataid of the linked data, if any, within the object.
If omitted the entire object is linked to the sample. id - the
sample id. version - the sample version. node - the sample node.
createdby - the user that created the link. created - the time the
link was created. expiredby - the user that expired the link, if
any. expired - the time the link was expired, if at all.) ->
structure: parameter "linkid" of type "link_id" (A link ID. Must
be globally unique. Always assigned by the Sample service.
Typically only of use to service admins.), parameter "upa" of type
"ws_upa" (A KBase Workspace service Unique Permanent Address
(UPA). E.g. 5/6/7 where 5 is the workspace ID, 6 the object ID,
and 7 the object version.), parameter "dataid" of type "data_id"
(An id for a unit of data within a KBase Workspace object. A
single object may contain many data units. A dataid is expected to
be unique within a single object. Must be less than 255
characters.), parameter "id" of type "sample_id" (A Sample ID.
Must be globally unique. Always assigned by the Sample service.),
parameter "version" of type "version" (The version of a sample.
Always > 0.), parameter "node" of type "node_id" (A SampleNode ID.
Must be unique within a Sample and be less than 255 characters.),
parameter "createdby" of type "user" (A user's username.),
parameter "created" of type "timestamp" (A timestamp in epoch
milliseconds.), parameter "expiredby" of type "user" (A user's
username.), parameter "expired" of type "timestamp" (A timestamp
in epoch milliseconds.)
"""
return self._client.run_job('SampleService.create_data_link',
[params], self._service_ver, context)
def expire_data_link(self, params, context=None):
"""
Expire a link from a KBase Workspace object.
The user must have admin permissions for the sample and write permissions for the
Workspace object.
:param params: instance of type "ExpireDataLinkParams"
(expire_data_link parameters. upa - the workspace upa of the
object from which the link originates. | |
"""
Class Features
Name: driver_data_io_dynamic
Author(s): <NAME> (<EMAIL>)
Date: '20210408'
Version: '1.0.0'
"""
######################################################################################
# Library
import logging
import os
import numpy as np
import pandas as pd
import xarray as xr
from copy import deepcopy
from lib_data_io_nc import read_file_collections, filter_file_collections, set_time_collections
from lib_utils_io import read_obj, write_obj, create_dset, write_dset
from lib_utils_system import fill_tags2string, make_folder
from lib_info_args import logger_name, time_format_algorithm, zip_extension
from driver_graphs_timeseries import DriverGraph
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
######################################################################################
# -------------------------------------------------------------------------------------
# Class DriverDynamic
class DriverDynamic:
# -------------------------------------------------------------------------------------
# Initialize class
def __init__(self, time_reference,
src_dict, anc_dict=None, anl_dict=None, dst_dict=None,
static_data_collection=None,
registry_data_collection=None,
alg_info=None, alg_template=None,
tag_terrain_data='terrain', tag_section_data='sections', tag_table_graph_data='table_graph_lut',
tag_static_source='source', tag_static_destination='destination',
tag_dynamic_source='source', tag_dynamic_destination='destination',
flag_cleaning_dynamic_src=True, flag_cleaning_dynamic_anl=True, flag_cleaning_dynamic_dst=True,
flag_cleaning_dynamic_tmp=True):
self.time_reference = time_reference
self.time_str = time_reference.strftime(time_format_algorithm)
self.src_dict = src_dict
self.anc_dict = anc_dict
self.anl_dict = anl_dict
self.dst_dict = dst_dict
self.tag_terrain_data = tag_terrain_data
self.tag_section_data = tag_section_data
self.tag_table_graph_data = tag_table_graph_data
self.tag_static_source = tag_static_source
self.tag_static_destination = tag_static_destination
self.tag_dynamic_source = tag_dynamic_source
self.tag_dynamic_destination = tag_dynamic_destination
self.info_domain_name = alg_info['domain_name']
self.info_variable_limits = alg_info['variable_limits']
self.alg_template_run = alg_template['run']
self.alg_template_time = alg_template['time']
self.alg_template_registry = alg_template['registry']
self.alg_template_list = list({**self.alg_template_run, **self.alg_template_time}.keys())
self.static_data_terrain = static_data_collection[self.tag_terrain_data]
self.static_data_section = static_data_collection[self.tag_section_data]
self.static_data_table_graph = static_data_collection[self.tag_table_graph_data]
self.registry_data_collection = registry_data_collection
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.filter_type_tag = 'type'
self.run_mode_tag = 'run_mode'
self.run_description_tag = 'run_description'
self.run_n_tag = 'run_n'
self.run_variables_tag = 'run_variables'
self.graph_dependencies_tag = 'graph_dependencies'
self.graph_description_tag = 'graph_description'
self.graph_fx_tag = 'graph_fx'
self.graph_name_tag = 'graph_tag_name'
self.time_search_period_tag = 'time_search_period'
self.time_search_freq_tag = 'time_search_frequency'
self.section_name_type = 'section'
if self.static_data_section.attrs:
for attr_key, attr_value in self.static_data_section.attrs.items():
if attr_key.lower() == self.filter_type_tag.lower():
self.section_name_type = attr_value
break
self.str_delimiter = ':'
self.dst_dict_plot = self.dst_dict['collections']['plot']
self.dst_dict_info = self.dst_dict['collections']['info']
self.dst_dict_datasets = self.dst_dict['collections']['datasets']
self.run_name_list = self.define_run_name(src_dict)
self.run_info_list = self.define_run_name(self.dst_dict_info)
self.run_plot_list = self.define_run_name(self.dst_dict_plot)
self.run_datasets_list = self.define_run_name(self.dst_dict_datasets)
if set(self.run_info_list) != set(self.run_plot_list):
log_stream.error(' ===> Destination datasets "info" and "plot" do not have the same keys, '
'Check your settings')
log_stream.error(" Info keys: " + str(set(self.run_info_list)) +
" :: Plot keys: " + str(set(self.run_plot_list)))
raise Exception("Keys in destination datasets are not correctly defined")
self.run_search_period = self.define_run_common_field(
src_dict, field_select=self.time_search_period_tag, field_cmp='max')
self.run_n = self.define_run_common_field(
src_dict, field_select=self.run_n_tag, field_cmp='max')
self.run_search_freq = self.define_run_common_field(
src_dict, field_select=self.time_search_freq_tag, field_cmp='unique')
self.run_time_range = self.define_run_time_range(time_reference, time_period=self.run_search_period,
time_frequency=self.run_search_freq)
self.run_point_order, self.run_point_name = self.define_point_name(name_type=self.section_name_type)
self.run_point_registry = self.define_point_registry(name_type=self.section_name_type)
self.run_file_collections_src = self.define_run_file_name_struct(
self.src_dict, run_args={'run_key': self.run_name_list}, run_time_period=self.run_time_range)
self.run_file_collections_anc = self.define_run_file_name_struct(
self.anc_dict, run_args={'run_key': self.run_name_list}, run_time_period=self.run_time_range)
self.run_file_collections_anl = self.define_run_file_name_struct(
self.anl_dict, run_args={'run_key': []}, run_time_period=pd.DatetimeIndex([self.time_reference]))
self.run_file_collections_dst_plot = self.define_run_file_name_struct(
self.dst_dict_plot,
run_args={'run_key': self.run_plot_list,
'basin_name:section_name': self.run_point_name,
'section_order': self.run_point_order},
run_time_period=pd.DatetimeIndex([self.time_reference]))
self.run_file_collections_dst_info = self.define_run_file_name_struct(
self.dst_dict_info,
run_args={'run_key': self.run_info_list,
'basin_name:section_name': self.run_point_name,
'section_order': self.run_point_order},
run_time_period=pd.DatetimeIndex([self.time_reference]))
self.run_file_collections_dst_datasets = self.define_run_file_name_struct(
self.dst_dict_datasets,
run_args={'run_key': self.run_datasets_list,
'basin_name:section_name': self.run_point_name,
'section_order': self.run_point_order},
run_time_period=pd.DatetimeIndex([self.time_reference]))
self.run_file_filter_src, self.run_file_filter_anc = self.filter_run_file_name_struct(
self.run_file_collections_src, self.run_file_collections_anc)
self.flag_cleaning_dynamic_src = flag_cleaning_dynamic_src
self.flag_cleaning_dynamic_anl = flag_cleaning_dynamic_anl
self.flag_cleaning_dynamic_dst = flag_cleaning_dynamic_dst
self.flag_cleaning_dynamic_tmp = flag_cleaning_dynamic_tmp
self.coord_name_time = 'times'
self.dim_name_time = 'time'
self.dims_order_1d = [self.dim_name_time]
self.data_header_ancillary = 'header'
self.data_values_ancillary = 'datasets'
self.data_attrs_ancillary = 'attributes'
self.run_attributes_removed = ['outlet_name', 'basin_name', 'plant_name', 'section_name']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define point registry
def define_point_registry(self, name_lower=True, name_type=None,
str_delimiter_registry=','):
if (name_type.__len__() == 1) and (name_type[0] is None):
name_type = name_type[0]
if name_lower:
section_domain = self.static_data_section['section_domain'].str.lower()
else:
section_domain = self.static_data_section['section_domain']
section_type = self.static_data_section['section_type']
run_point_domain_list = section_domain.values.tolist()
run_point_type_list = section_type.values.tolist()
run_plot_list = self.run_plot_list
point_registry_list = []
for run_point_domain_step, run_point_type_step in zip(run_point_domain_list, run_point_type_list):
if name_type is not None:
if run_point_type_step.lower() in name_type.lower():
point_registry_tmp = str_delimiter_registry.join([run_point_domain_step] + run_plot_list)
point_registry_list.append(point_registry_tmp)
else:
point_registry_tmp = str_delimiter_registry.join([run_point_domain_step] + run_plot_list)
point_registry_list.append(point_registry_tmp)
point_registry_list = sorted(list(set(point_registry_list)))
return point_registry_list
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define point name
def define_point_name(self, name_type=None, str_delimiter_name=':'):
if (name_type.__len__() == 1) and (name_type[0] is None):
name_type = name_type[0]
section_name_list = self.static_data_section['section_name'].values.tolist()
section_domain_list = self.static_data_section['section_domain'].values.tolist()
section_type_list = self.static_data_section['section_type'].values.tolist()
section_order_list = self.static_data_section['section_domain_order'].values.tolist()
section_order_selection = []
section_point_selection = []
for section_name_step, section_domain_step, section_order_step, section_type_step in zip(
section_name_list, section_domain_list, section_order_list, section_type_list):
if name_type is not None:
if section_type_step.lower() in name_type.lower():
section_point_tmp = str_delimiter_name.join([section_domain_step, section_name_step])
section_point_selection.append(section_point_tmp)
section_order_selection.append(section_order_step)
else:
section_point_tmp = str_delimiter_name.join([section_domain_step, section_name_step])
section_point_selection.append(section_point_tmp)
section_order_selection.append(section_order_step)
return section_order_selection, section_point_selection
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to filter filename(s) struct
@staticmethod
def filter_run_file_name_struct(run_file_collections, anc_file_collections, str_delimiter=':'):
file_dframe = None
anc_dframe = None
for (run_name, run_fields), (anc_name, anc_fields) in zip(
run_file_collections.items(), anc_file_collections.items()):
file_ws = []
anc_ws = []
for (run_time, run_file), (anc_time, anc_file) in zip(run_fields.items(), anc_fields.items()):
file_list = []
id_list = []
for file_id, file_name in enumerate(run_file):
if os.path.exists(file_name):
file_list.append(file_name)
id_list.append(file_id)
if id_list.__len__() >= 1:
anc_list = [anc_file[0]]
else:
anc_list = []
if file_list.__len__() > 1:
file_string = str_delimiter.join(file_list)
elif file_list.__len__() == 0:
file_string = None
else:
file_string = file_list[0]
if anc_list.__len__() > 1:
anc_string = str_delimiter.join(anc_list)
elif anc_list.__len__() == 0:
anc_string = None
else:
anc_string = anc_list[0]
file_ws.append(file_string)
anc_ws.append(anc_string)
file_array = np.asarray(file_ws)
anc_array = np.asarray(anc_ws)
file_series = pd.Series(data=file_array, index=list(run_fields.keys()))
anc_series = pd.Series(data=anc_array, index=list(anc_fields.keys()))
if file_dframe is None:
file_dframe = file_series.to_frame(name=run_name)
else:
file_dframe[run_name] = file_series
if anc_dframe is None:
anc_dframe = anc_series.to_frame(name=run_name)
else:
anc_dframe[run_name] = anc_series
run_file_filter = {}
anc_file_filter = {}
for file_column, anc_column in zip(list(file_dframe.columns), list(anc_dframe.columns)):
file_series_raw = file_dframe[file_column]
anc_series_raw = anc_dframe[anc_column]
file_series_filter = file_series_raw[file_series_raw.notnull()]
anc_series_filter = anc_series_raw[file_series_raw.notnull()]
run_file_filter[file_column] = file_series_filter
anc_file_filter[file_column] = anc_series_filter
return run_file_filter, anc_file_filter
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define filename(s) struct
def define_run_file_name_struct(self, run_dict,
run_args=None, run_time_period=None):
alg_template_run = self.alg_template_run
alg_template_time = self.alg_template_time
alg_template_raw = {**alg_template_run, **alg_template_time}
if 'run_key' in list(run_args.keys()):
run_list = run_args['run_key']
else:
log_stream.error(' ===> Bad definition or "run_key" in the run arguments dictionary')
raise Exception("Key not defined")
if run_list.__len__() == 0:
run_list = [None]
ancillary_args = deepcopy(run_args)
ancillary_args.pop('run_key', None)
if ancillary_args.__len__() == 1:
if 'basin_name:section_name' in list(ancillary_args.keys()):
ancillary_list_point = ancillary_args['basin_name:section_name']
ancillary_keys_point = ['basin_name:section_name'.split(self.str_delimiter)] * ancillary_list_point.__len__()
ancillary_list_order = ['NA'] * ancillary_list_point.__len__()
ancillary_keys_order = ['section_order'] * ancillary_list_point.__len__()
else:
log_stream.error(' ===> Ancillary tag "basin_name:section_name" is not defined.')
raise RuntimeError('Check your static collections to solve the error')
elif ancillary_args.__len__() == 2:
if 'basin_name:section_name' in list(ancillary_args.keys()):
ancillary_list_point = ancillary_args['basin_name:section_name']
ancillary_keys_point = ['basin_name:section_name'.split(self.str_delimiter)] * ancillary_list_point.__len__()
else:
log_stream.error(' ===> Ancillary tag "basin_name:section_name" is not defined.')
raise RuntimeError('Check your static collections to solve the error')
if 'section_order' in list(ancillary_args.keys()):
ancillary_list_order = ancillary_args['section_order']
ancillary_keys_order = ['section_order'] * ancillary_list_order.__len__()
else:
log_stream.error(' ===> Ancillary tag "section_order" is not defined.')
raise RuntimeError('Check your static collections to solve the error')
else:
ancillary_keys_point = None
ancillary_list_point = None
ancillary_keys_order = None
ancillary_list_order = None
if not isinstance(run_list, list):
run_list = [run_list]
run_file_collections = {}
for run_key in run_list:
if run_key is not None:
run_fields = run_dict[run_key]
run_folder_step = run_fields[self.folder_name_tag]
run_file_step = run_fields[self.file_name_tag]
else:
run_fields = deepcopy(run_dict)
run_folder_step = run_fields[self.folder_name_tag]
run_file_step = run_fields[self.file_name_tag]
if self.run_n_tag in list(run_fields.keys()):
run_n = run_fields[self.run_n_tag]
else:
run_n = 1
run_tag = self.define_run_n_range(n_max=run_n)
run_path_step = os.path.join(run_folder_step, run_file_step)
if run_key is not None:
run_file_collections[run_key] = {}
for run_time_step in run_time_period:
run_file_list = []
if run_key is not None:
run_file_collections[run_key][run_time_step] = {}
else:
run_file_collections[run_time_step] = {}
for run_tag_step in run_tag:
alg_template_filled = {}
for alg_template_key, alg_template_value in alg_template_time.items():
alg_template_filled[alg_template_key] = run_time_step
alg_template_filled['ensemble_name'] = run_tag_step
if ancillary_list_point is not None:
ancillary_dict_point = []
for anc_key_pnt, anc_value_pnt, anc_key_ord, anc_value_ord in zip(
ancillary_keys_point, ancillary_list_point, ancillary_keys_order, ancillary_list_order):
anc_value_pnt_parts = anc_value_pnt.split(self.str_delimiter)
anc_value_ord_parts = anc_value_ord.split(self.str_delimiter)
if not isinstance(anc_key_ord, list):
anc_key_ord = [anc_key_ord]
anc_keys = anc_key_pnt + anc_key_ord
anc_values = anc_value_pnt_parts + anc_value_ord_parts
ancillary_iterator = zip(anc_keys, anc_values)
ancillary_dict = dict(ancillary_iterator)
ancillary_dict_point.append(ancillary_dict)
else:
ancillary_dict_point = None
if ancillary_dict_point is not None:
for ancillary_value_step, ancillary_dict_step in zip(ancillary_list_point, ancillary_dict_point):
alg_template_filled = {**alg_template_filled, **ancillary_dict_step}
run_path_filled = fill_tags2string(run_path_step, alg_template_raw, alg_template_filled)
run_path_dict = {ancillary_value_step: run_path_filled}
run_file_list.append(run_path_dict)
else:
run_path_filled = fill_tags2string(run_path_step, alg_template_raw, alg_template_filled)
run_file_list.append(run_path_filled)
if run_key is not None:
run_file_collections[run_key][run_time_step] = run_file_list
else:
run_file_collections[run_time_step] = run_file_list
return run_file_collections
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define run time range
@staticmethod
def define_run_time_range(time_reference, time_period=1, time_frequency='H', time_reverse=True):
if time_period < 1:
log_stream.warning(' ===> Run time period less than 1, Set to 1 by the algorithm')
time_period = 1
time_range = pd.date_range(end=time_reference, periods=time_period, freq=time_frequency)
if time_reverse:
time_range = time_range[::-1]
return time_range
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define run n
@staticmethod
def define_run_n_range(n_min=1, n_max=1, n_step=1, n_format="{:03d}"):
n_name = np.arange(n_min, n_max + 1, n_step).tolist()
n_range = np.arange(1, n_name.__len__() + 1, 1).tolist()
n_list = []
for i_name, i_range in zip(n_name, n_range):
n_list.append(n_format.format(i_range))
return n_list
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define run common field
@staticmethod
def define_run_common_field(data_dict, field_select=None, field_cmp=None):
fields_common_values = []
for data_key, data_fields in data_dict.items():
for field_key, field_value in data_fields.items():
if field_key == field_select:
fields_common_values.append(field_value)
if fields_common_values.__len__() >= 1:
if field_cmp == 'max':
fields_common_value = max(fields_common_values)
elif field_cmp == 'min':
fields_common_value = min(fields_common_values)
elif field_cmp == 'unique':
fields_common_value = list(set(fields_common_values))
if fields_common_value.__len__() == 1:
| |
from logs import logDecorator as lD
import jsonref, pprint
import numpy as np
import matplotlib.pyplot as plt
import csv, json
from psycopg2.sql import SQL, Identifier, Literal
from lib.databaseIO import pgIO
from collections import Counter
from textwrap import wrap
from tqdm import tqdm
from multiprocessing import Pool
config = jsonref.load(open('../config/config.json'))
table2_config = jsonref.load(open('../config/modules/table2.json'))
logBase = config['logging']['logBase'] + '.modules.table2.table2'
@lD.log(logBase + '.genSUDUserKeys')
def genSUDUserKeys(logger):
'''
This function generates a .csv file for each SUD user's (siteid, backgroundid)
Parameters
----------
logger : {logging.Logger}
The logger used for logging error information
'''
try:
query = '''
SELECT
patientid
FROM
sarah.test3
WHERE
sud = true
'''
data = pgIO.getAllData(query)
csvfile = "../data/raw_data/SUDUser_keys.csv"
with open(csvfile,'w+') as output:
csv_output = csv.writer(output)
for row in data:
csv_output.writerow(row)
output.close()
except Exception as e:
logger.error('Failed to generate list of SUD users because of {}'.format(e))
return
@lD.log(logBase + '.createtest4Table')
def createTest4Table(logger):
'''Creates test4
This function creates the table sarah.test4, which contains boolean columns
for each mental disorder.
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
create_query = '''
CREATE TABLE sarah.test4(
patientid integer,
alc bool,
cannabis bool,
amphe bool,
halluc bool,
nicotin bool,
cocaine bool,
opioids bool,
sedate bool,
others bool,
polysub bool,
inhalant bool,
morethan2sud bool
)
'''
print(pgIO.commitData(create_query))
except Exception as e:
logger. error('Failed to create test4 table because of {}'.format(e))
return
@lD.log(logBase + '.popTest4')
def popTest4(logger):
'''Populates test4
This function populates the table sarah.test4, which contains boolean columns
for each mental disorder. If a user's row has True for that column, it means
that he/she has that disorder, and vice versa.
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
all_userkeys = "../data/raw_data/SUDUser_keys.csv"
with open(all_userkeys) as f:
readCSV = csv.reader(f, delimiter=",")
for user in tqdm(readCSV):
getQuery = SQL('''
SELECT
patientid,
array_agg(distinct cast(dsmno as text)) && array[{}] as alc,
array_agg(distinct cast(dsmno as text)) && array[{}] as cannabis,
array_agg(distinct cast(dsmno as text)) && array[{}] as amphe,
array_agg(distinct cast(dsmno as text)) && array[{}] as halluc,
array_agg(distinct cast(dsmno as text)) && array[{}] as nicotin,
array_agg(distinct cast(dsmno as text)) && array[{}] as cocaine,
array_agg(distinct cast(dsmno as text)) && array[{}] as opioids,
array_agg(distinct cast(dsmno as text)) && array[{}] as sedate,
array_agg(distinct cast(dsmno as text)) && array[{}] as others,
array_agg(distinct cast(dsmno as text)) && array[{}] as polysub,
array_agg(distinct cast(dsmno as text)) && array[{}] as inhalant
FROM
rwe_version1_1.pdiagnose
WHERE
patientid = {}
GROUP BY
patientid
''').format(
Literal(table2_config["params"]["sudcats"]["alc"]),
Literal(table2_config["params"]["sudcats"]["cannabis"]),
Literal(table2_config["params"]["sudcats"]["amphe"]),
Literal(table2_config["params"]["sudcats"]["halluc"]),
Literal(table2_config["params"]["sudcats"]["nicotin"]),
Literal(table2_config["params"]["sudcats"]["cocaine"]),
Literal(table2_config["params"]["sudcats"]["opioids"]),
Literal(table2_config["params"]["sudcats"]["sedate"]),
Literal(table2_config["params"]["sudcats"]["others"]),
Literal(table2_config["params"]["sudcats"]["polysub"]),
Literal(table2_config["params"]["sudcats"]["inhalant"]),
Literal(int(user[0]))
)
data = pgIO.getAllData(getQuery)
pushQuery = '''
INSERT INTO
sarah.test4(patientid, alc, cannabis, amphe, halluc, nicotin, cocaine, opioids, sedate, others, polysub, inhalant)
VALUES
%s
'''
deleteDupliQuery = '''
DELETE FROM sarah.test4 a USING (
SELECT MAX(ctid) as ctid, patientid
FROM sarah.test4
GROUP BY patientid HAVING count(*) > 1
) b
WHERE a.patientid = b.patientid
AND a.ctid <> b.ctid
'''
value = pgIO.commitData(deleteDupliQuery)
if value == True:
print("Duplicate values succesfully deleted")
print(pgIO.commitDataList(pushQuery, data))
except Exception as e:
logger. error('Failed to populate test4 table because of {}'.format(e))
return
@lD.log(logBase + '.divByAllAges')
def divByAllAges(logger, l):
'''Divides by total sample of each race
This function takes in a list of counts and returns a list (of similar structure)
with the percentage of the counts over the total
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
l {list} -- l[0] is the no. of AA , l[1] is the no. of NHPI, l[2] is the no. of MR
'''
resultList = []
with open("../data/final/sampleCount.json") as json_file:
table1results = json.load(json_file)
allAA = table1results['AA'][0]
allNHPI = table1results['NHPI'][0]
allMR = table1results['MR'][0]
resultList.append(round((l[0]/allAA)*100,1))
resultList.append(round((l[1]/allNHPI)*100,1))
resultList.append(round((l[2]/allMR)*100,1))
json_file.close()
return resultList
@lD.log(logBase + '.allAgesGeneralSUD')
def allAgesGeneralSUD(logger):
'''
Finds percentage of the total sample that has any SUD and more than 2 SUD
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
countDict = {
"any_sud": [],
"morethan2_sud": []
}
# Find number of users in each race who have any SUD
any_sud = []
for race in table2_config["inputs"]["races"]:
query = SQL('''
SELECT
count(*)
FROM
sarah.test2 t1
INNER JOIN
sarah.test4 t2
ON
t1.patientid = t2.patientid
WHERE
t1.race = {}
''').format(
Literal(race)
)
data = [d[0] for d in pgIO.getAllData(query)]
countDict["any_sud"].append(data[0])
# Find number of users in each race who have >2 SUD
count = {
"AA": 0,
"NHPI": 0,
"MR": 0
}
for race in table2_config["inputs"]["races"]:
query = SQL('''
SELECT
t2.alc,
t2.cannabis,
t2.amphe,
t2.halluc,
t2.nicotin,
t2.cocaine,
t2.opioids,
t2.sedate,
t2.others,
t2.polysub,
t2.inhalant
FROM
sarah.test2 t1
INNER JOIN
sarah.test4 t2
ON
t1.patientid = t2.patientid
WHERE
t1.race = {}
''').format(
Literal(race)
)
data = pgIO.getAllData(query)
for tuple in data:
if sum(list(tuple))>=2:
count[race]+=1
for race in count:
countDict["morethan2_sud"].append(count[race])
# Change counts to percentage of the race sample
resultsDict = {}
for row in countDict:
resultsDict[row] = divByAllAges(countDict[row])
except Exception as e:
logger.error('Failed to find general SUD counts because of {}'.format(e))
return resultsDict
@lD.log(logBase + '.allAgesCategorisedSUD')
def allAgesCategorisedSUD(logger):
'''
Finds percentage of the age-binned sample that have
SUD of a particular substance
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
countDict = {
"alc":[],
"cannabis":[],
"amphe":[],
"halluc":[],
"nicotin":[],
"cocaine":[],
"opioids":[],
"sedate":[],
"others":[],
"polysub":[],
"inhalant":[]
}
for race in table2_config["inputs"]["races"]:
for sudcat in table2_config["params"]["sudcats"]:
query = SQL('''
SELECT
count(*)
FROM
sarah.test2 t1
INNER JOIN
sarah.test4 t2
ON
t1.patientid = t2.patientid
WHERE
t1.race = {}
AND
t2.{} = true
''').format(
Literal(race),
Identifier(sudcat)
)
data = [d[0] for d in pgIO.getAllData(query)]
countDict[sudcat].append(data[0])
# Change counts to percentage of the race sample
resultsDict = {}
for row in countDict:
resultsDict[row] = divByAllAges(countDict[row])
except Exception as e:
logger.error('Failed to find categorised SUD counts because of {}'.format(e))
return resultsDict
@lD.log(logBase + '.divByAgeBins')
def divByAgeBins(logger, lol):
'''Divide by no. of people of each race in a certain age bin
This function takes in a list of lists called lol, where lol[0] is the list of AAs, lol[0][0] is for ages 1-11 and lol[0][1] is for ages 12-17 and so forth
Arguments:
logger {logging.Logger} -- logs error information
lol {list of lists} --
'''
resultLoL = []
with open("../data/final/sampleCount.json") as json_file:
table1results = json.load(json_file)
ageBinsAA = table1results['AA'][1]
ageBinsNHPI = table1results['NHPI'][1]
ageBinsMR = table1results['MR'][1]
resultLoL.append([round((x/y)*100, 1) for x, y in zip(lol[0], ageBinsAA)])
resultLoL.append([round((x/y)*100, 1) for x, y in zip(lol[1], ageBinsNHPI)])
resultLoL.append([round((x/y)*100, 1) for x, y in zip(lol[2], ageBinsMR)])
return resultLoL
@lD.log(logBase + '.ageBinnedGeneralSUD')
def ageBinnedGeneralSUD(logger):
'''
Finds percentage of the age-binned sample that has any SUD and more than 2 SUD
Decorators:
lD.log
Arguments:
logger {logging.Logger} -- logs error information
'''
try:
countDict = {
"any_sud": [],
"morethan2_sud": []
}
# Find number of users in each race who have any SUD, separated into age bins
any_sud = []
for race in table2_config["inputs"]["races"]:
counts = []
for lower, upper in zip(['1', '12', '18', '35', '50'], ['11', '17', '34', '49', '100']):
query = SQL('''
SELECT
count(*)
FROM
sarah.test2 t1
INNER JOIN
sarah.test3 t2
ON
t1.patientid = t2.patientid
WHERE
t1.race = {}
AND
t1.age BETWEEN {} AND {}
AND
t2.sud = true
''').format(
Literal(race),
Literal(lower),
Literal(upper)
)
data = [d[0] for d in pgIO.getAllData(query)]
counts.append(data[0])
countDict["any_sud"].append(counts)
# Find number of users in each race who have >2 SUD, separated into age bins
count = {
"AA": {
"1": 0,
"12": 0,
"18": 0,
"35": 0,
"50": 0
},
"NHPI": {
"1": 0,
"12": 0,
"18": 0,
"35": 0,
"50": 0
},
"MR": {
"1": 0,
"12": 0,
"18": 0,
"35": 0,
"50": 0
}
}
for race in table2_config["inputs"]["races"]:
for lower, upper in zip(['1', '12', '18', '35', '50'], ['11', '17', '34', '49', '100']):
query = SQL('''
SELECT
t2.alc,
t2.cannabis,
t2.amphe,
t2.halluc,
t2.nicotin,
t2.cocaine,
t2.opioids,
t2.sedate,
t2.others,
t2.polysub,
t2.inhalant
FROM
sarah.test2 t1
INNER JOIN
sarah.test4 t2
ON
t1.patientid = t2.patientid
WHERE
t1.race = {}
AND
t1.age BETWEEN {} AND {}
''').format(
Literal(race),
Literal(lower),
Literal(upper)
)
data = pgIO.getAllData(query)
for tuple | |
tf.stop_gradient(out)
return out
class CpcLearner(tf.keras.Model):
"""A learner for CPC."""
def __init__(self,
state_dim,
action_dim,
embedding_dim = 256,
num_distributions = None,
hidden_dims = (256, 256),
sequence_length = 2,
ctx_length = None,
ctx_action = False,
downstream_input_mode = 'embed',
learning_rate = None):
"""Creates networks.
Args:
state_dim: State size.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input
ctx_length: number of past steps to compute a context.
ctx_action: Whether to include past actions as a part of the context.
downstream_input_mode: Whether to use states, embedding, or context.
learning_rate: Learning rate.
"""
super().__init__()
self.input_dim = state_dim
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
self.sequence_length = sequence_length
self.ctx_length = ctx_length
self.ctx_action = ctx_action
self.downstream_input_mode = downstream_input_mode
self.embedder = EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
num_distributions=num_distributions,
hidden_dims=hidden_dims)
self.weight = tf.Variable(
tf.eye(
self.embedding_dim,
batch_shape=[sequence_length - (ctx_length or 1)]))
learning_rate = learning_rate or 3e-4
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.all_variables = [self.weight] + self.embedder.variables
if ctx_length:
ctx_dim = embedding_dim + action_dim if ctx_action else embedding_dim
self.ctx_embedder = RNNEmbedNet([ctx_length, ctx_dim],
embedding_dim)
self.all_variables += self.ctx_embedder.embedder.variables
else:
self.ctx_embedder = None
@tf.function
def call(self,
states,
actions = None,
stop_gradient = True):
"""Returns embedding.
Args:
states: 2 or 3 dimensional state tensors.
stop_gradient: Whether to stop_gradient.
Returns:
Embedding.
"""
if not self.ctx_embedder:
assert (len(states.shape) == 2)
return self.embedder(states, stop_gradient=stop_gradient)
outputs = []
for mode in self.downstream_input_mode.split('-'):
if mode == 'state':
outputs.append(states[:, self.ctx_length, :])
elif mode == 'embed':
outputs.append(
self.embedder(
states[:, self.ctx_length, :], stop_gradient=stop_gradient))
elif mode == 'ctx':
embedding = tf.reshape(states[:, :self.ctx_length, :],
[-1, tf.shape(states)[-1]])
embedding = self.embedder(embedding, stop_gradient=stop_gradient)
embedding = tf.reshape(
embedding, [-1, self.ctx_length, self.embedder.embedding_dim])
if self.ctx_action:
embedding = tf.concat([embedding, actions[:, :self.ctx_length, :]],
axis=-1)
embedding = self.ctx_embedder(embedding, stop_gradient=stop_gradient)
outputs.append(embedding)
return tf.concat(outputs, axis=-1)
def compute_energy(self, embeddings,
other_embeddings):
"""Computes matrix of energies between every pair of (embedding, other_embedding)."""
transformed_embeddings = tf.matmul(embeddings, self.weight)
energies = tf.matmul(transformed_embeddings, other_embeddings, transpose_b=True)
return energies
def fit(self, states,
actions):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
all_states = tf.reshape(states, [batch_size * self.sequence_length, self.input_dim])
all_embeddings = self.embedder(all_states, stop_gradient=False)
all_embeddings = tf.reshape(
all_embeddings, [batch_size, self.sequence_length, self.embedding_dim])
if self.ctx_embedder:
embeddings = all_embeddings[:, :self.ctx_length, :]
if self.ctx_action:
embeddings = tf.concat([embeddings, actions[:, :self.ctx_length, :]],
axis=-1)
embeddings = self.ctx_embedder(
embeddings, stop_gradient=False)[None, Ellipsis]
next_embeddings = tf.transpose(all_embeddings[:, self.ctx_length:, :],
[1, 0, 2])
else:
embeddings = all_embeddings[None, :, 0, :]
next_embeddings = tf.transpose(all_embeddings[:, 1:, :], [1, 0, 2])
energies = self.compute_energy(embeddings, next_embeddings)
positive_loss = tf.linalg.diag_part(energies)
negative_loss = tf.reduce_logsumexp(energies, axis=-1)
loss = tf.reduce_mean(-positive_loss + negative_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
return {
'embed_loss': loss,
}
@tf.function
def update_step(self, replay_buffer_iter):
states, actions, _, _, _ = next(replay_buffer_iter)
return self.fit(states, actions)
def get_input_state_dim(self):
if not self.ctx_embedder:
return self.embedder.embedding_dim
input_state_dim = 0
for mode in self.downstream_input_mode.split('-'):
if mode == 'state':
input_state_dim += self.input_dim
elif mode == 'embed':
input_state_dim += self.embedder.embedding_dim
elif mode == 'ctx':
input_state_dim += self.ctx_embedder.embedding_dim
return input_state_dim
class HiroLearner(tf.keras.Model):
"""A learner for HIRO."""
def __init__(self,
state_dim,
action_dim,
embedding_dim = 256,
hidden_dims = (256, 256),
sequence_length = 2,
learning_rate = None):
"""Creates networks.
Args:
state_dim: State size.
action_dim: Action size.
embedding_dim: Embedding size.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input
learning_rate: Learning rate.
"""
super().__init__()
self.input_dim = state_dim
self.embedding_dim = embedding_dim
self.sequence_length = sequence_length
self.embedder = EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
hidden_dims=hidden_dims)
self.action_embedder = EmbedNet(
state_dim + action_dim * (self.sequence_length - 1),
embedding_dim=self.embedding_dim,
hidden_dims=hidden_dims)
learning_rate = learning_rate or 1e-4
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.embed_history = tf.Variable(tf.zeros([1024, self.embedding_dim]))
self.all_variables = self.embedder.variables + self.action_embedder.variables
@tf.function
def call(self,
states,
stop_gradient = True):
"""Returns embedding.
Args:
states: A batch of states.
stop_gradient: Whether to stop_gradient.
Returns:
Embedding.
"""
return self.embedder(states, stop_gradient=stop_gradient)
def _sample_next_states(self, states, discount = 0.99):
"""Given a sequence of states, samples the `next_states` for loss computation."""
batch_size = tf.shape(states)[0]
d = self.sequence_length - 1
probs = discount ** tf.range(d, dtype=tf.float32)
probs *= tf.constant([1.0] * (d - 1) + [1.0 / (1 - discount)],
dtype=tf.float32)
probs /= tf.reduce_sum(probs)
index_dist = tfp.distributions.Categorical(probs=probs, dtype=tf.int64)
indices = index_dist.sample(batch_size)
batch_size = tf.cast(batch_size, tf.int64)
next_indices = tf.concat(
[tf.range(batch_size, dtype=tf.int64)[:, None],
1 + indices[:, None]], -1)
next_states = tf.gather_nd(states, next_indices)
return next_states
def fit(self, states, actions):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
next_states = self._sample_next_states(states)
cur_states = states[:, 0, :]
cur_embed = self.embedder(cur_states, stop_gradient=False)
next_embed = self.embedder(next_states, stop_gradient=False)
# Update history of embeddings with this batch's next_embed.
self.embed_history.assign(tf.concat([self.embed_history[batch_size:], next_embed], 0))
action_embed_input = tf.concat(
[cur_states, tf.reshape(actions[:, :-1, :], [batch_size, -1])], -1)
action_embed = self.action_embedder(action_embed_input, stop_gradient=False)
tau = 2.0
energy_fn = lambda z: -tau * tf.reduce_sum(huber(z), -1)
positive_loss = tf.reduce_mean(energy_fn(cur_embed + action_embed - next_embed))
# Negative loss should just be a log-avg-exp, but we compute it in a more
# numerically stable way below.
prior_log_probs = tf.reduce_logsumexp(
energy_fn((cur_embed + action_embed)[:, None, :]
- self.embed_history[None, :, :]),
axis=-1) - tf.math.log(tf.cast(self.embed_history.shape[0], tf.float32))
shifted_next_embed = tf.concat([next_embed[1:], next_embed[:1]], 0)
negative_loss = tf.reduce_mean(
tf.exp(energy_fn((cur_embed + action_embed) - shifted_next_embed)
- tf.stop_gradient(prior_log_probs)))
loss = tf.reduce_mean(-positive_loss + negative_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
return {
'embed_loss': loss
}
@tf.function
def update_step(self, replay_buffer_iter):
states, actions, _, _, _ = next(replay_buffer_iter)
return self.fit(states, actions)
def get_input_state_dim(self, downstream_input_mode):
return self.embedder.embedding_dim
class MomentumCpcLearner(CpcLearner):
"""A learner for momentum CPC."""
def __init__(self,
state_dim,
action_dim,
embedding_dim = 256,
hidden_dims = (256, 256),
residual_dims = (256,),
sequence_length = 2,
ctx_length = None,
downstream_input_mode = 'embed',
learning_rate = None,
tau = 0.05,
target_update_period = 1):
"""Creates networks.
Args:
state_dim: State size.
action_dim: Action size.
embedding_dim: Embedding size.
hidden_dims: List of hidden dimensions.
residual_dims: hidden dims for the residual network.
sequence_length: Expected length of sequences provided as input
ctx_length: Number of past steps to compute a context.
downstream_input_mode: Whether to use states, embedding, or context.
learning_rate: Learning rate.
tau: Rate for updating target network.
target_update_period: Frequency for updating target network.
"""
super().__init__(
state_dim,
action_dim,
embedding_dim=embedding_dim,
hidden_dims=hidden_dims,
sequence_length=sequence_length,
ctx_length=ctx_length,
downstream_input_mode=downstream_input_mode,
learning_rate=learning_rate)
self.residual_mlp = EmbedNet(
embedding_dim,
embedding_dim=embedding_dim,
hidden_dims=residual_dims)
self.embedder_target = EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
hidden_dims=hidden_dims)
soft_update(self.embedder, self.embedder_target, tau=1.0)
learning_rate = learning_rate or 3e-4
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.tau = tau
self.target_update_period = target_update_period
self.all_variables += self.residual_mlp.variables
def fit(self, states, actions):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
if self.ctx_length:
cur_states = states[:, :self.ctx_length, :]
cur_states = tf.reshape(cur_states, [-1, self.input_dim])
else:
cur_states = states[:, 0, :]
all_next_states = tf.reshape(states[:, self.ctx_length or 1:, :], [
batch_size * (self.sequence_length -
(self.ctx_length or 1)), self.input_dim
])
embeddings = self.embedder(cur_states, stop_gradient=False)
embeddings += self.residual_mlp(embeddings, stop_gradient=False)
all_next_embeddings = self.embedder_target(
all_next_states, stop_gradient=True)
if self.ctx_length:
embeddings = tf.reshape(embeddings,
[-1, self.ctx_length, self.embedding_dim])
embeddings = self.ctx_embedder(embeddings, stop_gradient=False)
next_embeddings = tf.reshape(all_next_embeddings, [
batch_size, self.sequence_length -
(self.ctx_length or 1), self.embedding_dim
])
embeddings = embeddings[None, :, :]
next_embeddings = tf.transpose(next_embeddings, [1, 0, 2])
energies = self.compute_energy(embeddings, next_embeddings)
positive_loss = tf.linalg.diag_part(energies)
negative_loss = tf.reduce_logsumexp(energies, axis=-1)
loss = tf.reduce_mean(-positive_loss + negative_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
if self.optimizer.iterations % self.target_update_period == 0:
soft_update(self.embedder, self.embedder_target, tau=self.tau)
return {
'embed_loss': loss
}
class ActionVaeLearner(tf.keras.Model):
"""A learner for variational construction of action given state."""
def __init__(self,
state_dim,
action_spec,
embedding_dim = 256,
num_distributions = None,
hidden_dims = (256, 256),
sequence_length = 2,
learning_rate = None,
kl_weight = 0.02,
trans_kl_weight = 0.0):
"""Creates networks.
Args:
state_dim: State size.
action_spec: Action spec.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input
learning_rate: Learning rate.
kl_weight: Weight on KL regularizer.
trans_kl_weight: Weight on KL regularizer of transformer outputs.
"""
super().__init__()
self.input_dim = state_dim
self.action_dim = action_spec.shape[0]
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
self.sequence_length = sequence_length
self.kl_weight = kl_weight
self.trans_kl_weight = trans_kl_weight
self.embedder = StochasticEmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
num_distributions=self.num_distributions,
hidden_dims=hidden_dims)
self.transition = StochasticEmbedNet(
self.embedding_dim + self.action_dim,
embedding_dim=self.embedding_dim,
num_distributions=self.num_distributions,
hidden_dims=hidden_dims)
self.policy | |
import logging
logger = logging.getLogger(__name__)
from pyramid.view import view_config
from pyramid.security import authenticated_userid
import models
from pyramid.httpexceptions import HTTPFound
from owslib.wps import WebProcessingService
import os
DEFAULTQUALITYSERVER="http://suffolk.dkrz.de:8094/wps"
@view_config(route_name='qc_wizard_check',
renderer='templates/qc_wizard.pt',
layout='default',
permission='edit',
)
def qc_wizard_check(request):
title = "Quality Control Wizard"
user_id = authenticated_userid(request)
token = "<PASSWORD>"# user_token(request, user_id)
if not token:
raise Exception("Can not find token")
session_id_help = ("An identifier used to avoid processes running on the same directory." +
" Using an existing one will remove all data inside its directory.")
session_ids = get_session_ids(user_id, request)
if session_ids == []:
session_id_help += " There are currently no existing Session IDs."
else:
session_id_help += " The existing Session IDs are:<br>" +", ".join(session_ids)
qc_select_help = ("Comma separated list of parts of the path descriptions." +
" If at least one description in the list matches the path is included." +
" In the path description '.*' is for any character sequence. (e.g. " +
"AFR-44/.*/tas, EUR.*, /fx/)")
qc_lock_help = ("Works similar to select, but prevents the given paths being added. " +
"Lock is stronger than select. (e.g. select tas and lock AFR-44 checks all "+
"paths with tas that do not contain AFR-44.)")
#a field in fields must contain text, id and value. The entry help is optional.
#allowed_values can be used if a limited number of possibile values should be available.
#In that case value will be used as default if it is in allowed_values.
#For type "checkbox" the existence of the "checked" key will lead to the checkbox being True.
fields = [
{"id": "quality_server_address", "type" : "text", "text": "URL to the Quality WPS",
"value":DEFAULTQUALITYSERVER},
{"id": "session_id", "type": "text", "text": "Session ID", "help":session_id_help,
"value": "web1"},
#{"id": "irods_home", "type": "text", "text": "iRods Home",
# "help": "The home directory of iRods", "value":"qc_dummy_DKRZ"},
#{"id": "irods_collection", "type": "text", "text": "iRods collection",
# "help": "Name of the to analyze collection", "value": "qc_test_20140416"},
{"id": "data_path", "type": "text", "text": "Root path of the to check data",
"value": ""},
{"id": "project", "type": "select", "text": "Project",
"value": "CORDEX", "allowed_values": ["CORDEX"] },
{"id": "select", "type": "text", "text": "QC SELECT", "value": "", "help": qc_select_help},
{"id": "lock", "type": "text", "text": "QC LOCK", "value": "", "help": qc_lock_help},
{"id": "replica", "type": "checkbox", "text": "Replica", "value": ""},
{"id": "latest", "type": "checkbox", "text": "Latest", "value": "", "checked": "checked"},
{"id": "publish_metadata", "type": "checkbox", "text": "Publish meta-data", "value": "",
"checked": "checked"},
{"id": "publish_quality", "type": "checkbox", "text": "Publish quality-data",
"value": "", "checked": "checked"},
{"id": "clean", "type": "checkbox", "text": "Clean afterwards",
"value": "", "help": "Removes the work data after the steps have finished"},
]
html_fields = get_html_fields(fields)
if "submit" in request.POST:
DATA = request.POST
#shorten the method parameters by automatically insert DATA
def getValue(identifier):
return getValueStatic(DATA, identifier)
def getBool(identifier):
return getBoolStatic(DATA, identifier)
##########################
#collect input parameters#
##########################
wps_address = getValue("quality_server_address")
username = str(user_id.replace("@","(at)"))
token = token
session_id = getValue("session_id")
#irods_home = DATA["irods_home"]
#irods_collection = DATA["irods_collection"]
data_path = getValue("data_path")
project = getValue("project")
#ensure lock and select are valid values.
select = getValue("select")
lock = getValue("lock")
#html checkboxes are true if and only if they are in the POST (DATA variable)
replica = getBool("replica")
latest = getBool("latest")
publish_metadata = getBool("publish_metadata")
publish_quality = getBool("publish_quality")
cleanup = getBool("clean")
#####################
#Run the wps call#
#####################
wps = WebProcessingService(wps_address)
identifier = "QC_Check_Full"
inputs = [("username", username), ("token", token), ("session_id", session_id),
#("irods_home", irods_home), ("irods_collection", irods_collection),
("data_path", data_path),
("project", project),
("select", select), ("lock", lock),
("replica", replica), ("latest", latest), ("publish_metadata", publish_metadata),
("publish_quality", publish_quality), ("cleanup", cleanup)]
#filter empty string values, because wps.execute does not like them.
inputs = [(x,y) for (x,y) in inputs if y!=""]
outputs = [("process_log", True)]
execution = wps.execute(identifier, inputs=inputs, output=outputs)
models.add_job(
request = request,
workflow = False,
title = execution.process.title,
wps_url = execution.serviceInstance,
status_location = execution.statusLocation,
notes = "test",
tags = "test")
return HTTPFound(location=request.route_url('jobs'))
return {
"title": title,
"html_fields" : html_fields,
}
def get_session_ids(user_id, request):
service_url = request.wps.url
token = "<PASSWORD>"#user_token(request, user_id)
identifier = 'Get_Session_IDs'
inputs = [("username",user_id.replace("@","(at)")),("token",token)]
outputs = "session_ids"
from wps import execute
wpscall_result = execute(service_url, identifier, inputs=inputs, output=outputs)
#there is only 1 output therefore index 0 is used for session_ids
if len(wpscall_result) > 0:
session_ids = wpscall_result[0].split("/")
else:
session_ids = []
return session_ids
def get_html_fields(fields):
"""
Converts a fields table with mandatory keywords type, text, id and partially optional
keywords value, allowed_values, checked and help into html input lines.
The tal in the templates has become too complicated therefore the python code handles
most of the conditions.
"""
html_fields = []
for field in fields:
html_field = {}
for key in ["help","text","id"]:
if key in field:
html_field[key] = field[key]
if field["type"] == "text":
html_field["input_html"] = ("<input name=\"" + field["id"] + "\" value=\"" +
field["value"] + "\" id=\"" + field["id"] +
"\"type=\"text\">\n")
if field["type"] == "select":
html_field["input_html"] = ("<select name=\"" + field["id"] + "\" value=\"" +
field["value"] + "\">\n")
for option in field["allowed_values"]:
html_field["input_html"] += "<option>"+option+"</option>\n"
html_field["input_html"] += "</select>\n"
if field["type"] == "checkbox":
html_field["input_html"] = ("<input name=\"" + field["id"] + "\" value=\"" +
field["value"] + "\" id=\"" + field["id"] +
"\"type=\"checkbox\"")
if "checked" in field:
html_field["input_html"] += " checked=\"checked\""
html_field["input_html"] += ">\n"
html_fields.append(html_field)
return html_fields
@view_config(route_name='qc_wizard_yaml',
renderer='templates/qc_wizard.pt',
layout='default',
permission='edit',
)
def qc_wizard_yaml(request):
title = "Quality Control Wizard"
user_id = authenticated_userid(request)
token = "<PASSWORD>" # user_token(request, user_id)
session_id_help = ("An identifier used to avoid processes running on the same directory." +
" Using an existing one will remove all data inside its work directory.")
session_ids = get_session_ids(user_id, request)
if session_ids == []:
session_id_help += " There are currently no existing Session IDs."
else:
session_id_help += " The existing Session IDs are:<br>" +", ".join(session_ids)
yamllogs_help = "The comma separated list of logfile locations"
oldprefix_help = "The data path in the provided logfiles"
newprefix_help = "The data path on the machine"
#a field in fields must contain text, id and value. The entry help is optional.
#allowed_values can be used if a limited number of possibile values should be available.
#In that case value will be used as default if it is in allowed_values.
#For type "checkbox" the existence of the "checked" key will lead to the checkbox being True.
fields = [
{"id": "quality_server_address", "type" : "text", "text": "URL to the Quality WPS",
"value":DEFAULTQUALITYSERVER},
{"id": "session_id", "type": "text", "text": "Session ID", "help":session_id_help,
"value": "checkdone"},
{"id": "yamllogs", "type": "text", "text": "YAML logs", "help": yamllogs_help, "value": ""},
{"id": "prefix_old", "type": "text", "text": "Old prefix", "help": oldprefix_help, "value": ""},
{"id": "prefix_new", "type": "text", "text": "New prefix", "help": newprefix_help, "value": ""},
{"id": "project", "type": "select", "text": "Project",
"value": "CORDEX", "allowed_values": ["CORDEX"] },
{"id": "replica", "type": "checkbox", "text": "Replica", "value": ""},
{"id": "latest", "type": "checkbox", "text": "Latest", "value": "", "checked": "checked"},
{"id": "publish_metadata", "type": "checkbox", "text": "Publish meta-data", "value": "",
"checked": "checked"},
{"id": "publish_quality", "type": "checkbox", "text": "Publish quality-data",
"value": "", "checked": "checked"},
{"id": "clean", "type": "checkbox", "text": "Clean afterwards",
"value": "", "help": "Removes the work data after the steps have finished"},
]
html_fields = get_html_fields(fields)
if "submit" in request.POST:
DATA = request.POST
#shorten the method parameters by automatically insert DATA
def getValue(identifier):
return getValueStatic(DATA, identifier)
def getBool(identifier):
return getBoolStatic(DATA, identifier)
##########################
#collect input parameters#
##########################
username = str(user_id.replace("@","(at)"))
token = token
session_id = getValue("session_id")
yamllogs = getValue("yamllogs")
prefix_old = getValue("prefix_old")
prefix_new = getValue("prefix_new")
project = getValue("project")
#html checkboxes are true if and only if they are in the POST (DATA variable)
replica = getBool("replica")
latest = getBool("latest")
publish_metadata = getBool("publish_metadata")
publish_quality = getBool("publish_quality")
cleanup = getBool("clean")
wps_address = getValue("quality_server_address")
wps = WebProcessingService(wps_address)
##################
#Run the wps call#
##################
wps = request.wps
identifier = "QC_Check_YAML"
inputs = [("username", username), ("token", token), ("session_id", session_id),
("yamllogs", yamllogs), ("prefix_old", prefix_old), ("prefix_new", prefix_new),
("project", project),
("replica", replica), ("latest", latest), ("publish_metadata", publish_metadata),
("publish_quality", publish_quality), ("cleanup", cleanup)]
outputs = [("process_log", True)]
#wps.execute does not like empty strings as value, so filter it out
inputs = [(x,y) for (x,y) in inputs if y!=""]
g = open("/home/dkrz/k204205/log","w")
g.write(str(inputs)+"\n")
g.write(str(outputs)+"\n")
g.write(str(identifier)+"\n")
| |
<filename>annofabapi/generated_api2.py
# flake8: noqa: W291
# pylint: disable=too-many-lines,trailing-whitespace
"""
AbstractAnnofabApi2のヘッダ部分
Note:
このファイルはopenapi-generatorで自動生成される。詳細は generate/README.mdを参照
"""
import abc
import warnings # pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union # pylint: disable=unused-import
import requests
import annofabapi # pylint: disable=unused-import
class AbstractAnnofabApi2(abc.ABC):
"""
AnnofabApi2クラスの抽象クラス
"""
@abc.abstractmethod
def _request_wrapper(
self,
http_method: str,
url_path: str,
query_params: Optional[Dict[str, Any]] = None,
header_params: Optional[Dict[str, Any]] = None,
request_body: Optional[Any] = None,
) -> Tuple[Any, requests.Response]:
pass
#########################################
# Public Method : AfAnnotationSpecsV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_annotation_specs_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""アノテーション仕様取得
https://annofab.com/docs/api/#operation/getAnnotationSpecsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトのアノテーション仕様を取得します。 本 API は `cache` クエリパラメータが同じであれば結果がキャッシュされ、以降は高速にレスポンスが返ります。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
history_id (str): 過去のアノテーション仕様を取得する場合、[アノテーション仕様履歴取得](#operation/getAnnotationSpecsHistories)APIで取得した `history_id` の値を指定します。 未指定時は最新のアノテーション仕様を取得します。
v (str): 取得するアノテーション仕様のフォーマットバージョンを指定します。
Returns:
Tuple[AnnotationSpecs, requests.Response]
"""
url_path = f"/projects/{project_id}/annotation-specs"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfOrganizationMemberV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_organization_member_v2(
self, organization_id: str, user_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""組織メンバー取得
https://annofab.com/docs/api/#operation/getOrganizationMemberV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したユーザーが指定した組織にどのようなロールで参加しているかを取得します。
Args:
organization_id (str): 組織ID (required)
user_id (str): ユーザID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[OrganizationMember, requests.Response]
"""
url_path = f"/organizations/{organization_id}/members/{user_id}"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_organization_members_v2(
self, organization_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""組織メンバー一括取得
https://annofab.com/docs/api/#operation/getOrganizationMembersV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
脱退したメンバーは含まれません。
Args:
organization_id (str): 組織ID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[OrganizationMemberList, requests.Response]
"""
url_path = f"/organizations/{organization_id}/members"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfOrganizationV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_organization_by_name_v2(
self, organization_name: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""組織名で組織情報取得
https://annofab.com/docs/api/#operation/getOrganizationByNameV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定された組織を取得します。 本 API は `cache` クエリパラメータが同じであれば結果がキャッシュされ、以降は高速にレスポンスが返ります。
Args:
organization_name (str): 組織名 (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[Organization, requests.Response]
"""
url_path = f"/organizations-by-name/{organization_name}"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_organization_cache_by_name_v2(self, organization_name: str, **kwargs) -> Tuple[Any, requests.Response]:
"""組織名で組織キャッシュレコード取得
https://annofab.com/docs/api/#operation/getOrganizationCacheByNameV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
組織の各リソースのキャッシュタイムスタンプを格納したキャッシュレコードを取得します。 これらタイムスタンプは、各リソースを取得する API の `cache` クエリパラメータに使用します。
Args:
organization_name (str): 組織名 (required)
Returns:
Tuple[OrganizationCacheRecord, requests.Response]
"""
url_path = f"/organizations-by-name/{organization_name}/cache"
http_method = "GET"
keyword_params: Dict[str, Any] = {}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_organization_cache_v2(self, organization_id: str, **kwargs) -> Tuple[Any, requests.Response]:
"""組織IDで組織キャッシュレコード取得
https://annofab.com/docs/api/#operation/getOrganizationCacheV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
組織の各リソースのキャッシュタイムスタンプを格納したキャッシュレコードを取得します。 これらタイムスタンプは、各リソースを取得する API の `cache` クエリパラメータに使用します。
Args:
organization_id (str): 組織ID (required)
Returns:
Tuple[OrganizationCacheRecord, requests.Response]
"""
url_path = f"/organizations/{organization_id}/cache"
http_method = "GET"
keyword_params: Dict[str, Any] = {}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_organization_v2(
self, organization_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""組織IDで組織情報取得
https://annofab.com/docs/api/#operation/getOrganizationV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定された組織を取得します。 本 API は `cache` クエリパラメータが同じであれば結果がキャッシュされ、以降は高速にレスポンスが返ります。
Args:
organization_id (str): 組織ID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[Organization, requests.Response]
"""
url_path = f"/organizations/{organization_id}"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_project_task_count_v2(
self, organization_id: str, project_id: str, **kwargs
) -> Tuple[Any, requests.Response]:
"""プロジェクトのタスク件数取得
https://annofab.com/docs/api/#operation/getProjectTaskCountV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
プロジェクトのタスク件数を取得します。
Args:
organization_id (str): 組織ID (required)
project_id (str): プロジェクトID (required)
Returns:
Tuple[ProjectTaskCounts, requests.Response]
"""
url_path = f"/organizations/{organization_id}/projects/{project_id}/task-counts"
http_method = "GET"
keyword_params: Dict[str, Any] = {}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfProjectMemberV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_project_member_v2(
self, project_id: str, user_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""プロジェクトメンバー取得
https://annofab.com/docs/api/#operation/getProjectMemberV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトメンバーを取得します。
Args:
project_id (str): プロジェクトID (required)
user_id (str): アカウントのユーザID. RESTクライアントユーザが指定しやすいように、Cognitoのaccount_idではなくuser_idとしている。 (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[ProjectMember], requests.Response]
"""
url_path = f"/projects/{project_id}/members/{user_id}"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_project_members_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""プロジェクトメンバー検索
https://annofab.com/docs/api/#operation/getProjectMembersV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定されたプロジェクトのメンバーを検索します。 パフォーマンスのため、結果はページング形式で返ります。全件取得したい場合は、レスポンスを見て、ページ移動してください。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
include_inactive_member (str): 脱退したプロジェクトメンバーも取得する時に、キーのみ指定します(値は無視されます)。
cache (str): CACHE TIMESTAMP
Returns:
Tuple[ProjectMemberList, requests.Response]
"""
url_path = f"/projects/{project_id}/members"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfProjectV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_project_cache_v2(self, project_id: str, **kwargs) -> Tuple[Any, requests.Response]:
"""プロジェクトキャッシュレコード取得
https://annofab.com/docs/api/#operation/getProjectCacheV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
プロジェクトの各種リソースのキャッシュタイムスタンプを取得します。 これらタイムスタンプは、各リソースを取得する API の `cache` クエリパラメータに使用します。
Args:
project_id (str): プロジェクトID (required)
Returns:
Tuple[ProjectCacheRecord, requests.Response]
"""
url_path = f"/projects/{project_id}/cache"
http_method = "GET"
keyword_params: Dict[str, Any] = {}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfStatisticsV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_account_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""ユーザー別タスク集計取得
https://annofab.com/docs/api/#operation/getAccountStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトのユーザー別タスク集計をすべて取得します。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[ProjectAccountStatistics], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/accounts"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_inspection_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""検査コメント集計取得
https://annofab.com/docs/api/#operation/getInspectionStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトの検査コメント集計をすべて取得します。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[InspectionStatistics], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/inspections"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_label_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""ラベル別アノテーション数集計取得
https://annofab.com/docs/api/#operation/getLabelStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトのラベル別アノテーション数集計をすべて取得します。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[LabelStatistics], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/labels"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_task_phase_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""フェーズ別タスク集計取得
https://annofab.com/docs/api/#operation/getTaskPhaseStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトのフェーズ別タスク集計をすべて取得します。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[TaskPhaseStatistics], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/task-phases"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_task_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""タスク集計取得
https://annofab.com/docs/api/#operation/getTaskStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したプロジェクトのタスク集計をすべて取得します。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[ProjectTaskStatisticsHistory], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/tasks"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_worktime_statistics_v2(
self, project_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""タスク作業時間集計取得
https://annofab.com/docs/api/#operation/getWorktimeStatisticsV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
ヒストグラムは最終日のby_tasks、by_inputsでのみ返却する。 アカウント毎の集計のby_tasks、by_inputsには、最終日であってもヒストグラムを返却しない。
Args:
project_id (str): プロジェクトID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[List[WorktimeStatistics], requests.Response]
"""
url_path = f"/projects/{project_id}/statistics/worktimes"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
#########################################
# Public Method : AfUsersV2Api
# NOTE: This method is auto generated by OpenAPI Generator
#########################################
def get_account_v2(
self, account_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""ユーザーアカウント情報取得
https://annofab.com/docs/api/#operation/getAccountV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
指定したユーザーの組織のアカウント情報を取得します。 本 API は `cache` クエリパラメータが同じであれば結果がキャッシュされ、以降は高速にレスポンスが返ります。
Args:
account_id (str): アカウントID (required)
query_params (Dict[str, Any]): Query Parameters
cache (str): CACHE TIMESTAMP
Returns:
Tuple[MyAccount, requests.Response]
"""
url_path = f"/users/{account_id}"
http_method = "GET"
keyword_params: Dict[str, Any] = {
"query_params": query_params,
}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_user_cache_v2(self, account_id: str, **kwargs) -> Tuple[Any, requests.Response]:
"""キャッシュレコード
https://annofab.com/docs/api/#operation/getUserCacheV2
authorizations: SignedCookieKeyPairId, SignedCookiePolicy, SignedCookieSignature
ユーザー別の各リソースのキャッシュタイムスタンプを格納したキャッシュレコードを取得します。 これらタイムスタンプは、各リソースを取得する API の `cache` クエリパラメータに使用します。
Args:
account_id (str): アカウントID (required)
Returns:
Tuple[UserCacheRecord, requests.Response]
"""
url_path = f"/users/{account_id}/cache"
http_method = "GET"
keyword_params: Dict[str, Any] = {}
return self._request_wrapper(http_method, url_path, **keyword_params)
def get_user_organizations_v2(
self, account_id: str, query_params: Optional[Dict[str, Any]] = None, **kwargs
) -> Tuple[Any, requests.Response]:
"""個人の所属組織検索
https://annofab.com/docs/api/#operation/getUserOrganizationsV2
authorizations: SignedCookieKeyPairId, | |
mailsane.normalize(request.json['email'])
if address.error:
return jsonify({'message' : str(address), 'valid' : False})
if dbworker.getUser(str(address)) is None:
return jsonify({'message' : 'Email address not found', 'valid' : False})
return jsonify({'message' : None, 'valid' : True})
@app.route('/api/loghours', methods=['POST', 'PUT'])
def logHours():
# request.json['hours'] is currently a string that gets converted server side
valid_access = [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]
if not dbworker.validateAccessList(valid_access):
abort(403)
if request.json is None:
abort(400)
for x in ['email', 'purpose', 'paid', 'hours']:
if x not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
hours = 0
try:
# Handle conversion from a string to a float
hours = float(request.json['hours'])
except:
abort(400)
if hours <= 0:
abort(400)
date = datetime.datetime.now()
dbworker.addHoursLog(str(email), request.json['purpose'], request.json['paid'], date, hours)
return jsonify({'dateTime': date})
@app.route('/api/admin/genhours', methods=['POST', 'PUT'])
def genHours():
# request.json['hours'] is currently a string that gets converted server side
valid_access = [dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor'], dbworker.userTypeMap['volunteer']]
if not dbworker.validateAccessList(valid_access):
abort(403)
if request.json is None:
abort(400)
for x in ['purpose', 'paid', 'hours', 'dateTime']:
if x not in request.json:
abort(400)
correctedTime = datetime.datetime.strptime(request.json['dateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
email = session['email']
if 'email' in request.json:
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
hours = 0
try:
# Handle conversion from a string to a float
hours = float(request.json['hours'])
except:
abort(400)
if hours <= 0:
abort(400)
dbworker.addHoursLog(str(email), request.json['purpose'], request.json['paid'], correctedTime, hours)
return jsonify({'success' : True})
@app.route('/api/admin/edithours', methods=['PATCH'])
def editHours():
"""
Takes in a json of the form
{'currentId' : id of hour log as string, 'newAttributes' : {...}}
It can change any attribute that is not the _id
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'currentId' not in request.json or 'newAttributes' not in request.json:
abort(400)
convClassId = ObjectId(request.json['currentId'])
# Validate that all the changes made are valid
# ie. ban changes to any invalid attributes
if request.json['newAttributes'] == {} or '_id' in request.json['newAttributes']:
# No changes requested or an attempt was made to change the _id
abort(400)
try:
validate(instance=request.json, schema=SchemaFactory.edit_hours)
except exceptions.ValidationError:
abort(400)
if 'dateTime' in request.json['newAttributes']:
# Convert dateTime from string to datetime object
# See https://stackoverflow.com/questions/969285/how-do-i-translate-an-iso-8601-datetime-string-into-a-python-datetime-object
correctedTime = None
try:
correctedTime = datetime.datetime.strptime(request.json['newAttributes']['dateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
except:
abort(400)
correctedDict = {}
for x in request.json['newAttributes']:
if x == 'dateTime':
correctedDict['dateTime'] = correctedTime
else:
correctedDict[x] = request.json['newAttributes'][x]
dbworker.editHour(convClassId, correctedDict)
else:
dbworker.editHour(convClassId, request.json['newAttributes'])
return jsonify({'success' : True})
@app.route('/api/admin/deletehour', methods=['POST', 'DELETE'])
def deleteHour():
"""
Takes in a json of the form
{'id' : id of hour log as string}
Deletes the hour associated with id
Aborts with a 409 in the event that it failed to work in the database
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'id' not in request.json:
abort(400)
convClassId = ObjectId(request.json['id'])
res = dbworker.deleteHour(convClassId)
if not res:
# Failure
abort(409)
return jsonify({'success' : True})
@app.route('/api/gethours/', methods=['GET'])
@app.route('/api/hours/', methods=['GET'])
def getHours():
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'],
dbworker.userTypeMap['instructor'],
dbworker.userTypeMap['volunteer']]):
abort(403)
pre_email = request.args.get('user', default=None, type=str)
email = None
if pre_email is None:
email = session.get('email')
if email is None:
abort(500)
else:
if not dbworker.validateAccessList([dbworker.userTypeMap['admin']]):
abort(403)
email = mailsane.normalize(pre_email)
if email.error:
abort(400)
hours = dbworker.getHours(filt={"email": str(email)}, projection={'_id' : 1, 'dateTime' : 1, 'purpose': 1, 'hours' : 1, 'paid' : 1})
hours_list = []
for doc in hours:
doc['_id'] = str(doc['_id'])
hours_list.append(doc)
return jsonify({"hours": hours_list})
@app.route('/api/report/', methods=['POST'])
def getReport():
"""
Return a PDF containing all worked/volunteer hours
"""
report_params = request.json
if report_params is None:
abort(400)
if 'email' not in report_params:
report_params['email'] = session['email']
try:
validate(instance=report_params, schema=SchemaFactory.report_hours)
except exceptions.ValidationError:
abort(400)
email = mailsane.normalize(report_params['email'])
if email.error:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin']]) and str(email) != session['email']:
# Allows admins to see everyones reports, users to see their own
abort(403)
paid_hrs = None
filt = {"email": str(email)}
proj = {'_id': 0, 'hours': 1}
if 'paid' in request.json:
filt['paid'] = True if request.json['paid'] else False
paid_hrs = False if request.json['paid'] == 0 else True
# Convert date ranges into datetime objects and insert into filter
# Note: to enforce a specific date/time pattern you can also use strptime method:
# datetime.datetime.strptime(request.json['startRange'], '%Y-%m-%d') (complete pattern: "%Y-%m-%dT%H:%M:%S.%fZ")
if 'startRange' in report_params and 'endRange' in report_params:
start_time_stamp = parse(report_params['startRange'])
end_time_stamp = parse(report_params['endRange'])
filt["dateTime"] = {'$gte': start_time_stamp, '$lte': end_time_stamp}
elif 'startRange' in report_params:
start_time_stamp = parse(report_params['startRange'])
filt["dateTime"] = {'$gte': start_time_stamp}
elif 'endRange' in report_params:
end_time_stamp = parse(report_params['endRange'])
filt["dateTime"] = {'$lte': end_time_stamp}
hours = dbworker.getHours(filt=filt, projection=proj)
hours_list = []
for doc in hours:
hours_list.append(float(doc["hours"]))
file_name = reportgen.hours(email, hours_list, paid_hrs)
# Once generated, report PDFs are currently stored in the 'app' folder of docker container
resp_file = send_file(file_name, attachment_filename=file_name)
if os.path.exists("app/" + file_name):
os.remove("app/" + file_name)
return resp_file
abort(500)
@app.route('/api/report/<string:class_id>/<string:email>', methods=['GET'])
def getStudentReport(class_id, email):
"""
Return a report for a student for a specific class.
Expected json is {"email": <EMAIL>, "classId":"5e5ab2f6e7179a5e7ee4e81b"}
"""
# try:
# validate(instance={"email":email, "classId":class_id}, schema=SchemaFactory.report_student)
# except exceptions.ValidationError:
# abort(400)
email = mailsane.normalize(email)
if email.error:
abort(400)
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(403)
# Must first convert classId string in to a ObjectId before executing query
convClassId = ObjectId(class_id)
# Verify: 'email' is an existing user in DB and 'convClassId' is the idea of an existing class
us = dbworker.getUser(str(email))
cl = dbworker.getClass(convClassId)
if us is None or cl is None:
abort(404)
if us['userType'] != dbworker.userTypeMap['student']:
abort(400)
filt = {"classId": convClassId, "studentEmail": str(email)}
proj = {'_id': 0}
report = dbworker.getStudentReport(filt=filt, proj=proj)
if report is None:
abort(400)
# Must convert ObjectId 'classId' into a string before responding
report['classId'] = str(report['classId'])
return jsonify({"report": report})
@app.route('/api/admin/getusers')
def getUsers():
"""
Returns a json of the form {'result' : list of users with emails, first and last names, 'success' : True}
"""
if not dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
abort(403)
uList = dbworker.getUsers(projection={'_id' : 0, 'email' : 1, 'firstName': 1, 'lastName' : 1, 'userType': 1})
fixedList = []
for x in uList:
fixedList.append(x)
return jsonify({'result' : fixedList, 'success' : True})
@app.route('/api/getuser', methods=['POST'])
@app.route('/api/admin/getuser', methods=['POST'])
def getUser():
"""
Takes in a JSON of {'email'}
Returns {'result' : {user information, no id or password}, 'success' : True}
This method is not just usable by admins, but by instructors
"""
if dbworker.validateAccessList([dbworker.userTypeMap['admin'], dbworker.userTypeMap['instructor']]):
pass
else:
abort(403)
if request.json is None or 'email' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
u = dbworker.getUser(str(email))
if u is None:
abort(405)
u.pop('password')
u.pop('_id')
now = datetime.datetime.now()
bday = now
if 'birthday' in u:
bday = u['birthday']
delta = now - bday
age = int(delta.total_seconds() / (31536000))
u['age'] = age
return jsonify({'result' : u, 'success' : True})
@app.route('/api/admin/edituser', methods=['PATCH'])
def editUser():
"""
Takes in a json of the form
{'currentEmail' : email, 'newAttributes' : {...}}
It can change any attribute that is not the email
"""
sys.stderr.write(str(request.json) + '\n')
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'currentEmail' not in request.json or 'newAttributes' not in request.json:
abort(400)
email = mailsane.normalize(request.json['currentEmail'])
if email.error:
abort(400)
if dbworker.getUser(str(email)) is None:
abort(404)
if request.json['newAttributes'] == {} or 'email' in request.json['newAttributes'] or '_id' in request.json['newAttributes']:
# No changes requested or an attempt was made to change the email or _id
abort(400)
# Validate that all the changes made are valid
# ie. ban changes to any invalid attributes
try:
validate(instance=request.json, schema=SchemaFactory.edit_user)
except exceptions.ValidationError:
abort(400)
if 'birthday' in request.json['newAttributes'] or 'password' in request.json['newAttributes']:
# Convert birthday from string to datetime object
# See https://stackoverflow.com/questions/969285/how-do-i-translate-an-iso-8601-datetime-string-into-a-python-datetime-object
correctedTime = None
try:
if 'birthday' in request.json['newAttributes']:
correctedTime = datetime.datetime.strptime(request.json['newAttributes']['birthday'], "%Y-%m-%dT%H:%M:%S.%fZ")
except:
abort(400)
correctedDict = {}
for x in request.json['newAttributes']:
if x == 'birthday':
correctedDict['birthday'] = correctedTime
elif x == 'password':
dbworker.setPassword(str(email), request.json['newAttributes']['password'])
else:
correctedDict[x] = request.json['newAttributes'][x]
dbworker.editUser(str(email), correctedDict)
else:
dbworker.editUser(str(email), request.json['newAttributes'])
return jsonify({'success' : True})
@app.route('/api/admin/createcourse', methods=['POST'])
def createCourse():
"""
Takes in a JSON of {'courseTitle'}
Returns {'_id' : newId (String), 'success' : True}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'courseTitle' not in request.json:
abort(400)
semester = ""
if 'semester' in request.json:
semester = request.json['semester']
val = dbworker.createClass(request.json['courseTitle'], [], [], [], semester)
return jsonify({'success' : True})
@app.route('/api/admin/addstudent', methods=['POST'])
def addStudent():
"""
Takes in a JSON of the structure {'email', 'classId'}
Adds <email> to <classId> as a student
Returns {'success' : Boolean}
"""
if not dbworker.validateAccess(dbworker.userTypeMap['admin']):
abort(403)
if request.json is None or 'email' not in request.json or 'classId' not in request.json:
abort(400)
email = mailsane.normalize(request.json['email'])
if email.error:
abort(400)
convClassId = ObjectId(request.json['classId'])
try:
validate(instance=request.json, | |
<filename>dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party.py<gh_stars>0
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# System Imports
import configparser
import glob
import json
import os
import re
import string
import fnmatch
from collections import OrderedDict
# waflib imports
from waflib import Configure, Context, Utils, Errors, Node
from waflib.TaskGen import feature, after_method
from waflib.Configure import ConfigurationContext, conf, Logs
from waflib.Errors import ConfigurationError, WafError
# lmbrwaflib imports
from lmbrwaflib.cry_utils import get_configuration, append_unique_kw_entry, append_to_unique_list
from lmbrwaflib.third_party_sync import P4SyncThirdPartySettings
from lmbrwaflib.utils import parse_json_file, write_json_file, calculate_file_hash, calculate_string_hash
from lmbrwaflib.gems import GemManager
from lmbrwaflib.settings_manager import LUMBERYARD_SETTINGS
# misc imports
from waf_branch_spec import BINTEMP_FOLDER, BINTEMP_CACHE_3RD_PARTY
ALIAS_SEARCH_PATTERN = re.compile(r'(\$\{([\w]*)})')
# Map platforms to their short version names in 3rd party
PLATFORM_TO_3RD_PARTY_SUBPATH = {
# win_x64 host platform
'win_x64_clang' : 'win_x64/vc140',
'win_x64_vs2017' : 'win_x64/vc140', # Not an error, VS2017 links with VS2015 binaries
'win_x64_vs2019' : 'win_x64/vc140',
'android_armv8_clang': 'win_x64/android_ndk_r15/android-21/arm64-v8a/clang-3.8',
# osx host platform
'darwin_x64' : 'osx/darwin-clang-703.0.31',
'ios' : 'osx/ios-clang-703.0.31',
'appletv' : 'osx/appletv-clang-703.0.31',
# linux host platform
'linux_x64' : 'linux/clang-3.8'}
CONFIGURATION_TO_3RD_PARTY_NAME = {'debug' : 'debug',
'profile': 'release',
'release': 'release'}
CONFIG_FILE_SETUP_ASSISTANT_CONFIG = "SetupAssistantConfig.json"
CONFIG_SETUP_ASSISTANT_USER_PREF = "SetupAssistantUserPreferences.ini"
@conf
def get_platform_to_3rd_party_subpath(ctx):
ctx.add_restricted_3rd_party_subpaths(PLATFORM_TO_3RD_PARTY_SUBPATH)
return PLATFORM_TO_3RD_PARTY_SUBPATH
def trim_lib_name(platform, lib_name):
"""
Trim the libname if it meets certain platform criteria
:param platform: The platform key to base the trim decision on
:param libname: The libname to conditionally trim
:return:
"""
if platform.startswith('win_x64'):
return lib_name
if platform.startswith('xenia'):
return lib_name
if not lib_name.startswith('lib'):
return lib_name
return lib_name[3:]
def get_third_party_platform_name(ctx, waf_platform_key):
"""
Get the third party platform name for a waf platform name
:param ctx: The current context
:param waf_platform_key: The waf platform key
:return:
"""
if waf_platform_key not in ctx.get_platform_to_3rd_party_subpath():
ctx.fatal('Platform {} does not have a mapped sub path for 3rd Party Static Libraries in {}/wscript'.format(waf_platform_key, ctx.path.abspath()))
return ctx.get_platform_to_3rd_party_subpath()[waf_platform_key]
def get_third_party_configuration_name(ctx, waf_configuration_key):
"""
Get the 3rd party configuration name for a waf configuration name
:param ctx: The current context
:param waf_configuration_key: The waf configuration key
:return:
"""
configuration_settings = LUMBERYARD_SETTINGS.get_build_configuration_setting(waf_configuration_key)
return configuration_settings.third_party_config
def evaluate_node_alias_map(lib_root, lib_name):
"""
Given a root of a lib config, perform an evaluation of all the aliases and produce a map of its value
:param lib_root: The lib root
:return: The map of aliased values
"""
def _process_alias_value(name_alias_map, name_alias_value, visit_stack):
alias_match = ALIAS_SEARCH_PATTERN.search(name_alias_value)
if alias_match is not None:
# This contains an aliased value
alias_key = alias_match.group(2)
# If the value is another alias
if alias_key not in name_alias_map:
# If the alias is not in the map, this is an error
raise RuntimeError('Invalid alias value {} for 3rd party library {}'.format(name_alias_value, lib_name))
if alias_key in visit_stack:
# If the alias is in the stack, this is a recursive error
raise RuntimeError('Recursive alias value error {} for 3rd party library {}'.format(name_alias_value, lib_name))
visit_stack.append(alias_key)
aliased_values = []
aliased_names = name_alias_map[alias_key] if isinstance(name_alias_map[alias_key], list) else [name_alias_map[alias_key]]
for alias_value in aliased_names:
pre_expansion_aliases = _process_alias_value(name_alias_map, alias_value, visit_stack)
for pre_expansion_alias in pre_expansion_aliases:
aliased_values.append(ALIAS_SEARCH_PATTERN.sub(pre_expansion_alias, name_alias_value))
visit_stack.pop()
return aliased_values
else:
return [name_alias_value]
# Scan the optional 'aliases' node off of the parent to look for aliases to build the map
evaluated_name_alias_map = {}
if 'aliases' not in list(lib_root.keys()):
return evaluated_name_alias_map
alias_node = lib_root['aliases']
# The first pass is to collect the pre-evaluated alias names
for node_key in alias_node:
node_alias_value = alias_node[node_key]
if isinstance(node_alias_value, str):
node_alias_value = [node_alias_value]
evaluated_name_alias_map[node_key] = node_alias_value
# Second pass, go through each list (possible recursively) and expand any aliased values in the list
stack = []
for aliased_node_key in evaluated_name_alias_map:
aliased_node_values = evaluated_name_alias_map[aliased_node_key]
stack.append(aliased_node_key)
values = []
for aliased_node_value in aliased_node_values:
values += _process_alias_value(evaluated_name_alias_map, aliased_node_value, stack)
evaluated_name_alias_map[aliased_node_key] = values
stack.pop()
return evaluated_name_alias_map
CACHE_JSON_MODEL_AND_ALIAS_MAP = {}
# Keep a cache of the 3rd party uselibs mappings to a tuple of the config file and the alias root map that applies to it
CONFIGURED_3RD_PARTY_USELIBS = {}
@conf
def get_platform_folder_to_keyword_map(ctx):
"""
Get the cached map of platforms to their PAL platform folder and keyword, so we can
attempt to search for platform-specific 3rd party fragments to combine with the input config file
:param ctx: Context
:return: Map of platform names to their keyword
"""
try:
return ctx.platform_folder_to_keyword_map
except AttributeError:
working_map = {}
target_platforms = ctx.get_all_target_platforms(False)
for target_platform in target_platforms:
assert 'platform_folder' in target_platform.attributes
assert 'platform_keyword' in target_platform.attributes
platform_folder = target_platform.attributes['platform_folder']
platform_keyword = target_platform.attributes['platform_keyword']
working_map[platform_folder] = platform_keyword
ctx.platform_folder_to_keyword_map = working_map
return ctx.platform_folder_to_keyword_map
@conf
def get_3rd_party_config_record(ctx, lib_config_file):
"""
Read a config and parse its alias map. Also provide a caching mechanism to retrieve the config dictionary, list of uselib names, and
alias map.
:param ctx: Config context
:param lib_config_file: The full pall of the config file to read
:return: The parsed json dictionary, list of uselib names, and the map of alias values if any
"""
if lib_config_file in CACHE_JSON_MODEL_AND_ALIAS_MAP:
return CACHE_JSON_MODEL_AND_ALIAS_MAP[lib_config_file]
lib_info = ctx.parse_json_file(lib_config_file)
assert "name" in lib_info
platform_folder_to_keyword_map = ctx.get_platform_folder_to_keyword_map()
config_file_name = lib_config_file.name
file_base_name, file_ext = os.path.splitext(config_file_name)
platform_base_node = lib_config_file.parent.make_node('Platform')
# Only the following sections at the root level of the config file is 'mergeable'
mergeable_keys = ['platform', 'aliases', 'configuration_settings']
for platform_folder, platform_keyword in list(platform_folder_to_keyword_map.items()):
# For each platform, check if there exists a platform fragment config to attempt to merge
check_fragment_filename = '{}_{}{}'.format(file_base_name, platform_keyword, file_ext)
check_fragment_path = os.path.join(platform_base_node.abspath(), platform_folder, check_fragment_filename)
if not os.path.exists(check_fragment_path):
continue
# Parse the platform fragment and attempt to merge the mergeable sections together
lib_platform_fragment_node = platform_base_node.make_node('{}/{}'.format(platform_folder, check_fragment_filename))
config_plaform_fragment_json = ctx.parse_json_file(lib_platform_fragment_node)
for level_one_key, level_one_dict in list(config_plaform_fragment_json.items()):
# Validate/Match the root (level 1) key for merging
if not isinstance(level_one_dict, dict):
raise Errors.WafError("3rd Party Platform Fragment file '{}' has an "
"invalid key '{}'. Fragment files may only contain "
"dictionaries of values.".format(lib_platform_fragment_node.abspath(),
level_one_key))
if level_one_key not in mergeable_keys:
raise Errors.WafError("3rd Party Platform Fragment file '{}' has an "
"unmergeable key '{}'. Valid mergeable keys are : {}".format(lib_platform_fragment_node.abspath(),
level_one_key,
','.join(mergeable_keys)))
target_root_dict = lib_info.setdefault(level_one_key, {})
for level_two_key, level_two_values in list(level_one_dict.items()):
# For each value in level 2 dictionary, merge new items from the fragment to the base one. This only accepts new keys
# and will not attempt to merge down even further
if level_two_key in target_root_dict:
raise Errors.WafError("Conflicting values for '{}' in config file '{}' "
"and fragment file '{}'".format(level_two_key,
lib_config_file.abspath(),
lib_platform_fragment_node.abspath()))
else:
target_root_dict[level_two_key] = level_two_values
uselib_names = [userlib_name.upper() for userlib_name in (lib_info["name"] if isinstance(lib_info["name"], list) else [lib_info["name"]])]
alias_map = evaluate_node_alias_map(lib_info, lib_config_file)
CACHE_JSON_MODEL_AND_ALIAS_MAP[lib_config_file] = (lib_info, uselib_names, alias_map)
return lib_info, uselib_names, alias_map
@conf
def mark_3rd_party_config_for_autoconf(ctx):
# Make sure we track 3rd party jsons for auto configure
if not hasattr(ctx, 'additional_files_to_track'):
ctx.additional_files_to_track = []
platform_folder_to_keyword_map = ctx.get_platform_folder_to_keyword_map()
config_3rdparty_folder = ctx.engine_node.make_node('_WAF_/3rdParty')
platform_base_node = config_3rdparty_folder.make_node('Platform')
config_file_nodes = config_3rdparty_folder.ant_glob('*.json')
for config_file_node in config_file_nodes:
# Track each base 3rd party config node
ctx.additional_files_to_track.append(config_file_node)
# Track any platform fragment files for the config
config_file_name = config_file_node.name
file_base_name, file_ext = os.path.splitext(config_file_name)
for platform_folder, platform_keyword in list(platform_folder_to_keyword_map.items()):
# For each platform, check if there exists a platform fragment config to attempt to merge
check_fragment_filename = '{}_{}{}'.format(file_base_name, platform_keyword, file_ext)
check_fragment_path = os.path.join(platform_base_node.abspath(), platform_folder, check_fragment_filename)
if not os.path.exists(check_fragment_path):
continue
# Track the platform fragment file for auto configure as well
lib_platform_fragment_node = platform_base_node.make_node('{}/{}'.format(platform_folder, check_fragment_filename))
ctx.additional_files_to_track.append(lib_platform_fragment_node)
# Also track Setup Assistant Config File
setup_assistant_config_node = ctx.engine_node.make_node('SetupAssistantConfig.json')
ctx.additional_files_to_track.append(setup_assistant_config_node)
@conf
def read_and_mark_3rd_party_libs(ctx):
def _process(config_folder_node, config_file_abs, path_alias_map):
filename = os.path.basename(config_file_abs)
# Attempt to load the 3rd party configuration
lib_config_file = config_folder_node.make_node(filename)
config, uselib_names, alias_map = get_3rd_party_config_record(ctx, lib_config_file)
# Mark all the uselib names as processed
for uselib_name in uselib_names:
CONFIGURED_3RD_PARTY_USELIBS[uselib_name] = (lib_config_file, path_alias_map)
config_3rdparty_folder = ctx.engine_node.make_node('_WAF_/3rdParty')
config_3rdparty_folder_path = config_3rdparty_folder.abspath()
config_files = glob.glob(os.path.join(config_3rdparty_folder_path, '*.json'))
if ctx.is_engine_local():
root_alias_map = {'ROOT': ctx.srcnode.abspath()}
else:
root_alias_map = {'ROOT': ctx.engine_path}
for config_file in config_files:
_process(config_3rdparty_folder, config_file, root_alias_map)
# Read the 3rd party configs with export 3rd party set to true
all_gems = GemManager.GetInstance(ctx).gems
for gem in all_gems:
ctx.root.make_node(gem.abspath).make_node('3rdParty')
gem_3p_abspath = os.path.join(gem.abspath, '3rdParty', '*.json')
gem_3p_config_files = glob.glob(gem_3p_abspath)
gem_3p_node = ctx.root.make_node(gem.abspath).make_node('3rdParty')
gem_alias_map = {'ROOT': ctx.srcnode.abspath(),
'GEM': gem.abspath}
for gem_3p_config_file in gem_3p_config_files:
_process(gem_3p_node, | |
from __future__ import print_function, division, absolute_import
from collections import defaultdict, deque
from datetime import timedelta
from importlib import import_module
import heapq
import logging
import os
import random
import tempfile
from threading import current_thread, Lock, local
from timeit import default_timer
import shutil
import sys
from dask.core import istask
from dask.compatibility import apply
try:
from cytoolz import valmap, merge, pluck, concat
except ImportError:
from toolz import valmap, merge, pluck, concat
from tornado.gen import Return
from tornado import gen
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.locks import Event
from .batched import BatchedSend
from .comm.core import get_address_host_port
from .config import config
from .compatibility import reload, unicode, invalidate_caches, cache_from_source
from .core import (connect, send_recv, error_message, CommClosedError,
rpc, Server, pingpong, coerce_to_address, RPCClosed)
from .metrics import time
from .protocol.pickle import dumps, loads
from .sizeof import sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .utils import (funcname, get_ip, has_arg, _maybe_complex, log_errors,
All, ignoring, validate_key, mp_context)
from .utils_comm import pack_data, gather_from_workers
_ncores = mp_context.cpu_count()
thread_state = local()
logger = logging.getLogger(__name__)
LOG_PDB = config.get('pdb-on-err') or os.environ.get('DASK_ERROR_PDB', False)
no_value = '--no-value-sentinel--'
try:
import psutil
TOTAL_MEMORY = psutil.virtual_memory().total
except ImportError:
logger.warn("Please install psutil to estimate worker memory use")
TOTAL_MEMORY = 8e9
IN_PLAY = ('waiting', 'ready', 'executing', 'long-running')
PENDING = ('waiting', 'ready', 'constrained')
PROCESSING = ('waiting', 'ready', 'constrained', 'executing', 'long-running')
READY = ('ready', 'constrained')
class WorkerBase(Server):
def __init__(self, scheduler_ip, scheduler_port=None, ncores=None,
loop=None, local_dir=None, services=None, service_ports=None,
name=None, heartbeat_interval=5000, reconnect=True,
memory_limit='auto', executor=None, resources=None,
silence_logs=None, **kwargs):
if scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self._port = 0
self.ncores = ncores or _ncores
self.local_dir = local_dir or tempfile.mkdtemp(prefix='worker-')
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
if silence_logs:
logger.setLevel(silence_logs)
if not os.path.exists(self.local_dir):
os.mkdir(self.local_dir)
if memory_limit == 'auto':
memory_limit = int(TOTAL_MEMORY * 0.6 * min(1, self.ncores / _ncores))
with ignoring(TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = memory_limit * TOTAL_MEMORY
self.memory_limit = memory_limit
if self.memory_limit:
try:
from zict import Buffer, File, Func
except ImportError:
raise ImportError("Please `pip install zict` for spill-to-disk workers")
path = os.path.join(self.local_dir, 'storage')
storage = Func(dumps_to_disk, loads_from_disk, File(path))
self.data = Buffer({}, storage, int(float(self.memory_limit)), weight)
else:
self.data = dict()
self.loop = loop or IOLoop.current()
self.status = None
self._closed = Event()
self.reconnect = reconnect
self.executor = executor or ThreadPoolExecutor(self.ncores)
self.scheduler = rpc(scheduler_addr)
self.name = name
self.heartbeat_interval = heartbeat_interval
self.heartbeat_active = False
self.execution_state = {'scheduler': self.scheduler.address,
'ioloop': self.loop,
'worker': self}
self._last_disk_io = None
self._last_net_io = None
self._ipython_kernel = None
if self.local_dir not in sys.path:
sys.path.insert(0, self.local_dir)
self.services = {}
self.service_ports = service_ports or {}
self.service_specs = services or {}
handlers = {
'gather': self.gather,
'compute-stream': self.compute_stream,
'run': self.run,
'run_coroutine': self.run_coroutine,
'get_data': self.get_data,
'update_data': self.update_data,
'delete_data': self.delete_data,
'terminate': self.terminate,
'ping': pingpong,
'health': self.host_health,
'upload_file': self.upload_file,
'start_ipython': self.start_ipython,
'keys': self.keys,
}
super(WorkerBase, self).__init__(handlers, io_loop=self.loop, **kwargs)
self.heartbeat_callback = PeriodicCallback(self.heartbeat,
self.heartbeat_interval,
io_loop=self.loop)
@property
def worker_address(self):
""" For API compatibility with Nanny """
return self.address
@gen.coroutine
def heartbeat(self):
if not self.heartbeat_active:
self.heartbeat_active = True
logger.debug("Heartbeat: %s" % self.address)
try:
yield self.scheduler.register(address=self.address, name=self.name,
ncores=self.ncores,
now=time(),
host_info=self.host_health(),
services=self.service_ports,
memory_limit=self.memory_limit)
finally:
self.heartbeat_active = False
else:
logger.debug("Heartbeat skipped: channel busy")
@gen.coroutine
def _register_with_scheduler(self):
self.heartbeat_callback.stop()
while True:
if self.status in ('closed', 'closing'):
raise gen.Return
try:
resp = yield self.scheduler.register(
ncores=self.ncores, address=self.address,
keys=list(self.data),
name=self.name,
nbytes=self.nbytes,
now=time(),
host_info=self.host_health(),
services=self.service_ports,
memory_limit=self.memory_limit,
local_directory=self.local_dir,
resources=self.total_resources)
break
except EnvironmentError:
logger.debug("Unable to register with scheduler. Waiting")
yield gen.sleep(0.1)
if resp != 'OK':
raise ValueError(resp)
self.heartbeat_callback.start()
def start_services(self, listen_ip=''):
for k, v in self.service_specs.items():
if isinstance(k, tuple):
k, port = k
else:
port = 0
self.services[k] = v(self, io_loop=self.loop)
self.services[k].listen((listen_ip, port))
self.service_ports[k] = self.services[k].port
@gen.coroutine
def _start(self, addr_or_port=0):
assert self.status is None
# XXX Factor this out
if isinstance(addr_or_port, int):
# Default ip is the required one to reach the scheduler
self.ip = get_ip(
get_address_host_port(self.scheduler.address)[0]
)
self.listen((self.ip, addr_or_port))
else:
self.listen(addr_or_port)
self.ip = get_address_host_port(self.address)[0]
self.name = self.name or self.address
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services()
logger.info(' Start worker at: %26s', self.address)
for k, v in self.service_ports.items():
logger.info(' %16s at: %20s:%d' % (k, self.ip, v))
logger.info('Waiting to connect to: %26s',
self.scheduler.address)
logger.info('-' * 49)
logger.info(' Threads: %26d', self.ncores)
if self.memory_limit:
logger.info(' Memory: %23.2f GB', self.memory_limit / 1e9)
logger.info(' Local Directory: %26s', self.local_dir)
logger.info('-' * 49)
yield self._register_with_scheduler()
logger.info(' Registered to: %32s', self.scheduler.address)
logger.info('-' * 49)
self.status = 'running'
def start(self, port=0):
self.loop.add_callback(self._start, port)
def identity(self, comm):
return {'type': type(self).__name__,
'id': self.id,
'scheduler': self.scheduler.address,
'ncores': self.ncores,
'memory_limit': self.memory_limit}
@gen.coroutine
def _close(self, report=True, timeout=10):
if self.status in ('closed', 'closing'):
return
logger.info("Stopping worker at %s", self.address)
self.status = 'closing'
self.stop()
self.heartbeat_callback.stop()
with ignoring(EnvironmentError):
if report:
yield gen.with_timeout(timedelta(seconds=timeout),
self.scheduler.unregister(address=self.address),
io_loop=self.loop)
self.scheduler.close_rpc()
self.executor.shutdown()
if os.path.exists(self.local_dir):
shutil.rmtree(self.local_dir)
for k, v in self.services.items():
v.stop()
self.rpc.close()
self.status = 'closed'
self._closed.set()
@gen.coroutine
def terminate(self, comm, report=True):
yield self._close(report=report)
raise Return('OK')
@gen.coroutine
def wait_until_closed(self):
yield self._closed.wait()
assert self.status == 'closed'
def _deserialize(self, function=None, args=None, kwargs=None, task=None):
""" Deserialize task inputs and regularize to func, args, kwargs """
if function is not None:
function = loads(function)
if args:
args = loads(args)
if kwargs:
kwargs = loads(kwargs)
if task is not None:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
@gen.coroutine
def executor_submit(self, key, function, *args, **kwargs):
""" Safely run function in thread pool executor
We've run into issues running concurrent.future futures within
tornado. Apparently it's advantageous to use timeouts and periodic
callbacks to ensure things run smoothly. This can get tricky, so we
pull it off into an separate method.
"""
job_counter[0] += 1
# logger.info("%s:%d Starts job %d, %s", self.ip, self.port, i, key)
future = self.executor.submit(function, *args, **kwargs)
pc = PeriodicCallback(lambda: logger.debug("future state: %s - %s",
key, future._state), 1000, io_loop=self.loop); pc.start()
try:
yield future
finally:
pc.stop()
result = future.result()
# logger.info("Finish job %d, %s", i, key)
raise gen.Return(result)
def run(self, comm, function, args=(), kwargs={}):
return run(self, comm, function=function, args=args, kwargs=kwargs)
def run_coroutine(self, comm, function, args=(), kwargs={}, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs,
is_coro=True, wait=wait)
def update_data(self, comm=None, data=None, report=True):
for key, value in data.items():
if key in self.task_state:
self.transition(key, 'memory', value=value)
else:
self.put_key_in_memory(key, value)
self.task_state[key] = 'memory'
self.tasks[key] = None
self.priorities[key] = None
self.durations[key] = None
self.dependencies[key] = set()
if key in self.dep_state:
self.transition_dep(key, 'memory', value=value)
self.log.append((key, 'receive-from-scatter'))
if report:
self.batched_stream.send({'op': 'add-keys',
'keys': list(data)})
info = {'nbytes': {k: sizeof(v) for k, v in data.items()},
'status': 'OK'}
return info
@gen.coroutine
def delete_data(self, comm=None, keys=None, report=True):
if keys:
for key in list(keys):
self.log.append((key, 'delete'))
if key in self.task_state:
self.release_key(key)
if key in self.dep_state:
self.release_dep(key)
logger.debug("Deleted %d keys", len(keys))
if report:
logger.debug("Reporting loss of keys to scheduler")
yield self.scheduler.remove_keys(address=self.address,
keys=list(keys))
raise Return('OK')
@gen.coroutine
def get_data(self, comm, keys=None, who=None):
start = time()
msg = {k: to_serialize(self.data[k]) for k in keys if k in self.data}
nbytes = {k: self.nbytes.get(k) for k in keys if k in self.data}
stop = time()
if self.digests is not None:
self.digests['get-data-load-duration'].add(stop - start)
start = time()
try:
compressed = yield comm.write(msg)
except EnvironmentError:
logger.exception('failed during get data', exc_info=True)
comm.abort()
raise
stop = time()
if self.digests is not None:
self.digests['get-data-send-duration'].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append({
'start': start,
'stop': stop,
'middle': (start + stop) / 2,
'duration': duration,
'who': who,
'keys': nbytes,
'total': total_bytes,
'compressed': compressed,
'bandwidth': total_bytes / duration
})
raise gen.Return('dont-reply')
@gen.coroutine
def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
yield self.scheduler.set_resources(resources=self.total_resources,
worker=self.address)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip,
ns={'worker': self},
log=logger,
)
return self._ipython_kernel.get_connection_info()
def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_dir, filename)
if isinstance(data, unicode):
data = data.encode()
with open(out_filename, 'wb') as f:
f.write(data)
f.flush()
if load:
try:
name, ext = os.path.splitext(filename)
names_to_import = []
if ext in ('.py', '.pyc'):
names_to_import.append(name)
# Ensures that no pyc file will be reused
cache_file = cache_from_source(out_filename)
if os.path.exists(cache_file):
os.remove(cache_file)
if ext in ('.egg', '.zip'):
if out_filename not in sys.path:
sys.path.insert(0, out_filename)
if ext == '.egg':
import pkg_resources
pkgs = pkg_resources.find_distributions(out_filename)
for | |
<reponame>Mattlau04/Splatnet2-Rich-Presence
import json
import math
import os
import time
import pypresence
import click
import nso_functions
from pypresence import Presence
from datetime import datetime, timedelta
from config.logger import logger
# this acts as the core controller which allows us to interface with the Splatnet API
nso = nso_functions.NSOInterface()
# config
client_id = '488433036665552897' # client id for discord rich presence
# this is the interval at which your rich presence updates. default is 3 seconds
time_interval = 3
# time it takes for the bot to declare you not playing splatoon in minutes
timeout_minutes = 300000
show_weapon = True # change to False to remove weapon name from discord details
def get_minutes_since():
matches = nso.load_results()
# When Previous Match was Salmon Run
if matches[0].get('job_result') is not None:
match_end = int(
matches[0]["play_time"])
# When Previous Match was Turf, Ranked, League or Private
else:
try:
match_end = int(matches[0]["start_time"] + matches[0][
"elapsed_time"]) # adds the seconds of the match to the unix time of the match starting
except KeyError:
match_end = int(matches[0][
"start_time"] + 180) # we assume that the match lasted 3 minutes here, as sometimes the API doesn't give us how long the match took
# Date + Time Now
time_now = datetime.utcnow()
# Date + Time from last played match/run (Unix Timestamp to DateTime)
time_match = datetime.utcfromtimestamp(match_end)
# Calculate time difference between the two dates
time_to_last_match = time_now - time_match
# Get minutes since last Match/Run
minutes_since = time_to_last_match.total_seconds() / 60
return minutes_since, matches[0]
#Return "s" if there needs to be plural, else it doesn't
def plural_logic(nbr):
return "" if nbr == 1 else "s"
@click.command()
def main():
logger.info("Checking for updates...")
os.system("git pull")
logger.info(
"If updates were done, restart this script by using CTRL-C to terminate it, and re run it.")
# Make connection to Discord
try:
RPC = Presence(client_id) # Initialize the Presence class
RPC.connect() # Start the handshake loop
except pypresence.exceptions.InvalidPipe:
logger.error(
"Could not connect to the discord pipe. Please ensure it's running.")
exit(1)
except FileNotFoundError:
logger.error(
"Could not connect to the discord pipe. Please ensure it's running.")
exit(1)
# Load our current config
config = nso_functions.get_config_file()
logger.info("Check discord!")
# Get friend code from config, and add config option if does not exist
try:
friend_code = config['friend_code']
except KeyError:
config['friend_code'] = 'Unset'
config_f = open("config.txt", "w")
config_f.write(json.dumps(config, sort_keys=True, indent=4, ))
config_f.close()
friend_code = config['friend_code']
while True: # The presence will stay on as long as the program is running
for i in range(0, 5):
minutes_since, last_match = get_minutes_since()
# Calculating the secs/hours/days since Last Match/Run
seconds_since = int(minutes_since * 60)
hours_since = int(minutes_since / 60)
days_since = int(minutes_since / 1440)
# When Previous Match was Salmon Run
# job_result is only present in salmon run JSON
if last_match.get('job_result') is not None:
# Sets Gamemode Key in order to change the Picture
gamemode_key = "salmon_run"
# Decides if last Run is shown in days, hours, minutes or seconds
# In Days
if minutes_since >= 1440:
details = "Last Run: {} day{} ago".format(days_since, plural_logic(days_since))
# In Hours
elif minutes_since >= 60:
details = "Last Run: {} h{} ago".format(hours_since, plural_logic(hours_since))
# In Minutes
elif minutes_since > 1:
details = "Last Run: {} min{} ago".format(math.floor(minutes_since), plural_logic(math.floor(minutes_since)))
# In Seconds
else:
details = "Last Run: {} sec{} ago".format(seconds_since, plural_logic(seconds_since))
# Deciding the Result
if last_match['job_result']['is_clear']:
outcome = "WON"
else:
outcome = "LOST"
### Checks how many waves were played on last Run
# If all 3 Waves were played
if last_match["wave_details"][2]:
goldEgg = last_match["wave_details"][0]["golden_ikura_num"] + \
last_match["wave_details"][1]["golden_ikura_num"] + \
last_match["wave_details"][2]["golden_ikura_num"]
powEgg = last_match["wave_details"][0]["ikura_num"] + \
last_match["wave_details"][1]["ikura_num"] + \
last_match["wave_details"][2]["ikura_num"]
# If only 2 Waves were played
elif not last_match["wave_details"][2] and last_match["wave_details"][1]:
goldEgg = last_match["wave_details"][0]["golden_ikura_num"] + last_match["wave_details"][1]["golden_ikura_num"]
powEgg = last_match["wave_details"][0]["ikura_num"] + last_match["wave_details"][1]["ikura_num"]
# If only 1 Wave was played
else:
goldEgg = last_match["wave_details"][0]["golden_ikura_num"]
powEgg = last_match["wave_details"][0]["ikura_num"]
# When hovering on the Picture
large_text = "Last match was Salmon Run on {}".format(
last_match['schedule']['stage']['name']
)
# IGN and Salmon Run Rank
if i == 0:
details = "IGN: {}".format(
last_match["my_result"]["name"]
)
state = "{} {}".format(
(last_match["grade"])["long_name"],
last_match["grade_point"]
)
# Friend code
elif i == 1:
if not friend_code:
state = "FC: Not Given"
else:
state = "FC: {}".format(friend_code)
# Hazard Level
elif i == 2:
state = "Hazard Level: {}".format(
str(last_match["danger_rate"]) + "%"
)
# Result and Total Collected Golden Eggs / Power Eggs
elif i == 3:
details = "GoldEgg/PowEgg ({})".format(
outcome
)
state = "{} / {}".format(
goldEgg,
powEgg
)
# Save / Death Ratio
elif i == 4:
state = "Save/Death Ratio: {}/{}".format(
last_match["my_result"]["help_count"],
last_match["my_result"]["dead_count"]
)
if minutes_since < timeout_minutes:
RPC.update(
details=details,
state=state,
large_image=gamemode_key,
small_image="default",
large_text=large_text
)
else:
RPC.clear()
logger.debug("RPC cleared, not in game long enough")
time.sleep(time_interval)
# When Previous Match was Turf, Ranked, League or Private
else:
# Decides if last Match is shown in days, hours, minutes or seconds
# In Days
if minutes_since >= 1440:
details = "Last Match: {} day{} ago".format(days_since, plural_logic(days_since))
# In Hours
elif minutes_since >= 60:
details = "Last Match: {} h{} ago".format(hours_since, plural_logic(hours_since))
# In Minutes
elif minutes_since > 1:
details = "Last Match: {} min{} ago".format(math.floor(minutes_since), plural_logic(math.floor(minutes_since)))
# In Seconds
else:
details = "Last Match: {} sec{} ago".format(seconds_since, plural_logic(seconds_since))
# When hovering on the Picture
large_text = "Last match was {}, {} on {}".format(
last_match["game_mode"]["name"],
last_match["rule"]["name"],
last_match["stage"]["name"]
)
# Gets Gamemode Key in order to change the Picture
gamemode_key = last_match["rule"]["key"]
# Gets Lobby Key
lobby_key = last_match["game_mode"]["key"]
# IGN and Level (+ Rank)
if i == 0:
details = "IGN: {}".format(
last_match["player_result"]["player"]["nickname"]
)
# Checks if player has a Level Star
# If player has no Level Star (yet XP)
if not last_match["star_rank"]:
# If last match was in a Regular Lobby (Turf War) or Private Lobby
if lobby_key == "regular" or lobby_key == "private":
state = "Level: {}".format(
last_match["player_result"]["player"]["player_rank"],
)
# If last match was in a Ranked Solo Lobby
elif lobby_key == "gachi":
# If last match was Splat Zones
if gamemode_key == "splat_zones":
# If player has S+ Rank
if last_match["udemae"]["name"] == "S+":
state = "Lvl: {}/R(SZ): {}{}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"],
last_match["udemae"]["s_plus_number"]
)
# If player has X Rank
elif last_match["udemae"]["name"] == "X":
# Checks if Player has any X Power
# If Player has no X Power (yet XP)
if not last_match["x_power"]:
state = "Lvl: {}/R(SZ): X(TBD)".format(
last_match["player_result"]["player"]["player_rank"],
)
# If Player has X Power
else:
state = "Lvl: {}/R(SZ): X({})".format(
last_match["player_result"]["player"]["player_rank"],
last_match["x_power"]
)
# If player has other Ranks
else:
state = "Lvl: {}/R(SZ): {}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"]
)
# If last match was Tower Control
elif gamemode_key == "tower_control":
# If player has S+ Rank
if last_match["udemae"]["name"] == "S+":
state = "Lvl: {}/R(TC): {}{}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"],
last_match["udemae"]["s_plus_number"]
)
# If player has X Rank
elif last_match["udemae"]["name"] == "X":
# Checks if Player has any X Power
# If Player has no X Power (yet XP)
if not last_match["x_power"]:
state = "Lvl: {}/R(TC): X(TBD)".format(
last_match["player_result"]["player"]["player_rank"],
)
# If Player has X Power
else:
state = "Lvl: {}/R(TC): X({})".format(
last_match["player_result"]["player"]["player_rank"],
last_match["x_power"]
)
# If player has other Ranks
else:
state = "Lvl: {}/R(TC): {}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"]
)
# If last match was Rainmaker
elif gamemode_key == "rainmaker":
# If player has S+ Rank
if last_match["udemae"]["name"] == "S+":
state = "Lvl: {}/R(RM): {}{}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"],
last_match["udemae"]["s_plus_number"]
)
# If player has X Rank
elif last_match["udemae"]["name"] == "X":
# Checks if Player has any X Power
# If Player has no X Power (yet XP)
if not last_match["x_power"]:
state = "Lvl: {}/R(RM): X(TBD)".format(
last_match["player_result"]["player"]["player_rank"],
)
# If Player has X Power
else:
state = "Lvl: {}/R(RM): X({})".format(
last_match["player_result"]["player"]["player_rank"],
last_match["x_power"]
)
# If player has other Ranks
else:
state = "Lvl: {}/R(RM): {}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"]
)
# If last match was <NAME>
else:
# If player has S+ Rank
if last_match["udemae"]["name"] == "S+":
state = "Lvl: {}/R(CB): {}{}".format(
last_match["player_result"]["player"]["player_rank"],
last_match["udemae"]["name"],
last_match["udemae"]["s_plus_number"]
)
# If player has X Rank
elif last_match["udemae"]["name"] == "X":
# Checks if Player has | |
"""
interface_wrapper.py - EXCALIBUR high level API for the ODIN server.
<NAME>, DLS
"""
import sys
import traceback
import logging
import json
from datetime import datetime
import time
import threading
import getpass
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
from excalibur.detector import ExcaliburDetector, ExcaliburDetectorError
from excalibur.detector_sim import ExcaliburSimulator
from excalibur.calibration_files import DetectorCalibration
from excalibur.definitions import ExcaliburDefinitions
from excalibur.efuse_id_parser import ExcaliburEfuseIDParser
from excalibur.fem import ExcaliburFem
from odin.adapters.parameter_tree import ParameterAccessor, ParameterTree
from enum import Enum
from collections import OrderedDict
class ExcaliburParameter(OrderedDict):
def __init__(self, param, value,
fem=ExcaliburDefinitions.ALL_FEMS, chip=ExcaliburDefinitions.ALL_CHIPS):
super(ExcaliburParameter, self).__init__()
self['param'] = param
self['value'] = value
self['fem'] = fem
self['chip'] = chip
def get(self):
return self.param, self.value, self.fem, self.chip
class ExcaliburReadParameter(OrderedDict):
def __init__(self, param, fem=ExcaliburDefinitions.ALL_FEMS, chip=ExcaliburDefinitions.ALL_CHIPS):
super(ExcaliburReadParameter, self).__init__()
self['param'] = param
self['fem'] = fem
self['chip'] = chip
def get(self):
return self.param, self.fem, self.chip
class HLExcaliburDetector(ExcaliburDetector):
"""Wraps the detector class to provide a high level interface.
"""
test_mode = False
STATE_IDLE = 0
STATE_ACQUIRE = 1
STATE_CALIBRATING = 2
CALIBRATION_AREAS = [
'dac',
'discl',
'disch',
'mask',
'thresh'
]
POWERCARD_PARAMS = [['fe_lv_enable',
'fe_hv_enable',
'pwr_p5va_vmon'],
['pwr_p5vb_vmon',
'pwr_p5v_fem00_imon',
'pwr_p5v_fem01_imon'],
['pwr_p5v_fem02_imon',
'pwr_p5v_fem03_imon',
'pwr_p5v_fem04_imon'],
['pwr_p5v_fem05_imon',
'pwr_p48v_vmon',
'pwr_p48v_imon'],
['pwr_p5vsup_vmon',
'pwr_p5vsup_imon',
'pwr_humidity_mon'],
['pwr_air_temp_mon',
'pwr_coolant_temp_mon',
'pwr_coolant_flow_mon'],
['pwr_p3v3_imon',
'pwr_p1v8_imonA',
'pwr_bias_imon'],
['pwr_p3v3_vmon',
'pwr_p1v8_vmon',
'pwr_bias_vmon'],
['pwr_p1v8_imonB',
'pwr_p1v8_vmonB',
'pwr_coolant_temp_status'],
['pwr_humidity_status',
'pwr_coolant_flow_status',
'pwr_air_temp_status',
'pwr_fan_fault']
]
EFUSE_PARAMS = [
'efuseid_c0',
'efuseid_c1',
'efuseid_c2',
'efuseid_c3',
'efuseid_c4',
'efuseid_c5',
'efuseid_c6',
'efuseid_c7',
'efuse_match'
]
FEM_PARAMS = [
'fem_local_temp',
'fem_remote_temp'
]
MOLY_PARAMS = [
'moly_temp',
'moly_humidity'
]
SUPPLY_PARAMS = [
'supply_p1v5_avdd1',
'supply_p1v5_avdd2',
'supply_p1v5_avdd3',
'supply_p1v5_avdd4',
'supply_p1v5_vdd1',
'supply_p2v5_dvdd1'
]
STR_STATUS = 'status'
STR_STATUS_POLL_ACTIVE = 'poll_active'
STR_STATUS_SENSOR = 'sensor'
STR_STATUS_SENSOR_WIDTH = 'width'
STR_STATUS_SENSOR_HEIGHT = 'height'
STR_STATUS_SENSOR_BYTES = 'bytes'
STR_STATUS_MANUFACTURER = 'manufacturer'
STR_STATUS_MODEL = 'model'
STR_STATUS_ERROR = 'error'
STR_STATUS_STATE = 'state'
STR_STATUS_FEM_STATE = 'fem_state'
STR_STATUS_FEM_FRAMES = 'fem_frames'
STR_STATUS_FRAMES_ACQUIRED = 'frames_acquired'
STR_STATUS_FRAME_RATE = 'frame_rate'
STR_STATUS_ACQUISITION_COMPLETE = 'acquisition_complete'
STR_STATUS_LV_ENABLED = 'lv_enabled'
STR_STATUS_CALIBRATING = 'calibrating'
STR_STATUS_CALIBRATION = 'calibration'
STR_STATUS_POWERCARD = 'powercard'
STR_STATUS_POWERCARD_HV_ENABLED = 'hv_enabled'
STR_STATUS_EFUSE = 'efuse'
STR_STATUS_FEM = 'fems'
STR_STATUS_SUPPLY = 'supply'
STR_STATUS_DACS = 'dacs'
STR_CONFIG = 'config'
STR_CONFIG_NUM_IMAGES = 'num_images'
STR_CONFIG_EXPOSURE_TIME = 'exposure_time'
STR_CONFIG_NUM_TEST_PULSES = 'num_test_pulses'
STR_CONFIG_SCAN_DAC_NUM = 'scan_dac_num'
STR_CONFIG_SCAN_DAC_START = 'scan_dac_start'
STR_CONFIG_SCAN_DAC_STOP = 'scan_dac_stop'
STR_CONFIG_SCAN_DAC_STEP = 'scan_dac_step'
STR_CONFIG_TEST_PULSE_ENABLE = 'test_pulse_enable'
STR_CONFIG_IMAGE_MODE = 'image_mode'
STR_CONFIG_OPERATION_MODE = 'operation_mode'
STR_CONFIG_LFSR_BYPASS = 'lfsr_bypass'
STR_CONFIG_READ_WRITE_MODE = 'read_write_mode'
STR_CONFIG_DISC_CSM_SPM = 'disc_csm_spm'
STR_CONFIG_EQUALIZATION_MODE = 'equalization_mode'
STR_CONFIG_TRIGGER_MODE = 'trigger_mode'
STR_CONFIG_TRIGGER_POLARITY = 'trigger_polarity'
STR_CONFIG_CSM_SPM_MODE = 'csm_spm_mode'
STR_CONFIG_COLOUR_MODE = 'colour_mode'
STR_CONFIG_GAIN_MODE = 'gain_mode'
STR_CONFIG_COUNTER_SELECT = 'counter_select'
STR_CONFIG_COUNTER_DEPTH = 'counter_depth'
STR_CONFIG_CAL_FILE_ROOT = 'cal_file_root'
STR_CONFIG_ENERGY_THRESHOLD_0 = 'energy_threshold_0'
STR_CONFIG_ENERGY_THRESHOLD_1 = 'energy_threshold_1'
STR_CONFIG_ENERGY_DELTA = 'energy_delta'
STR_CONFIG_UDP_FILE = 'udp_file'
STR_CONFIG_HV_BIAS = 'hv_bias'
STR_CONFIG_LV_ENABLE = 'lv_enable'
STR_CONFIG_HV_ENABLE = 'hv_enable'
STR_CONFIG_TEST_DAC_FILE = 'test_dac_file'
STR_CONFIG_TEST_MASK_FILE = 'test_mask_file'
def __init__(self, fem_connections, simulated=False):
self._simulated = simulated
if self._simulated:
ExcaliburFem.use_stub_api = True
self._simulator = ExcaliburSimulator(fem_connections)
else:
self._simulator = None
super(HLExcaliburDetector, self).__init__(fem_connections)
self._startup_time = datetime.now()
self._username = getpass.getuser()
self._fems = range(1, len(fem_connections)+1)
logging.debug("Fem conection IDs: %s", self._fems)
self._default_status = []
for fem in self._fems:
self._default_status.append(None)
# Initialise sensor dimensions
self._sensor_width = ExcaliburDefinitions.X_PIXELS_PER_CHIP * ExcaliburDefinitions.X_CHIPS_PER_FEM
self._sensor_height = ExcaliburDefinitions.Y_PIXELS_PER_CHIP * ExcaliburDefinitions.Y_CHIPS_PER_FEM * len(self._fems)
self._sensor_bytes = 0
# Initialise state
self._state = HLExcaliburDetector.STATE_IDLE
# Initialise dual 12 bit status
self._dual_12bit_valid = False
# Initialise error message
self._error = ''
# Initialise acquisition status
self._fem_state = None
self._fem_frames = 0
self._frames_acquired = None
self._frame_rate = None
self._acquisition_complete = None
# Initialise polling variables
self._poll_active = True
self._poll_timeout = datetime.now()
# Initialise hv and lv enabled status
self._lv_enabled = 0
self._lv_check_counter = 2
# Create the calibration object and associated status dict
self._calibrating = 0
self._calibration_bitmask = [0] * len(self._fems)
self._cb = DetectorCalibration()
self._calibration_status = {}
for cb in self.CALIBRATION_AREAS:
self._calibration_status[cb] = [0] * len(self._fems)
# Create the Excalibur parameters
self._num_images = 1
self._exposure_time = 1.0
self._num_test_pulses = 0
self._scan_dac_num = 0
self._scan_dac_start = 0
self._scan_dac_stop = 0
self._scan_dac_step = 0
self._test_pulse_enable = ExcaliburDefinitions.FEM_TEST_PULSE_NAMES[0]
self._image_mode = ExcaliburDefinitions.FEM_IMAGEMODE_NAMES[0]
self._operation_mode = ExcaliburDefinitions.FEM_OPERATION_MODE_NAMES[0]
self._lfsr_bypass = ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_NAMES[0]
self._read_write_mode = ExcaliburDefinitions.FEM_READOUT_MODE_NAMES[0]
self._disc_csm_spm = ExcaliburDefinitions.FEM_DISCCSMSPM_NAMES[0]
self._equalization_mode = ExcaliburDefinitions.FEM_EQUALIZATION_MODE_NAMES[0]
self._trigger_mode = ExcaliburDefinitions.FEM_TRIGMODE_NAMES[0]
self._trigger_polarity = ExcaliburDefinitions.FEM_TRIGPOLARITY_NAMES[1]
self._csm_spm_mode = ExcaliburDefinitions.FEM_CSMSPM_MODE_NAMES[0]
self._colour_mode = ExcaliburDefinitions.FEM_COLOUR_MODE_NAMES[0]
self._gain_mode = ExcaliburDefinitions.FEM_GAIN_MODE_NAMES[0]
self._counter_select = 0
self._counter_depth = '12'
self._cal_file_root = ''
self._energy_threshold_0 = 0.0
self._energy_threshold_1 = 0.0
self._energy_delta = 0.0
self._udp_file = ''
self._hv_bias = 0.0
self._lv_enable = 0
self._hv_enable = 0
self._test_dac_file = ''
self._test_mask_file = ''
self._dacs = {}
# Initialise the powercard
self._powercard_status = None
powercard_tree = self.init_powercard()
# Initialise the efuse structure
self._efuse_status = None
efuse_tree = self.init_efuse_ids()
# Initialise the supply structure
self._supply_status = None
supply_tree = self.init_supply()
# Initialise the fem structure
self._fem_status = None
fem_tree = self.init_fems()
# Initialise the parameter tree from the general status, powercard status and efuse status
tree = {
'api': (lambda: 0.1, {
# Meta data here
}),
'username': (lambda: self._username, {}),
'start_time': (lambda: self._startup_time.strftime("%B %d, %Y %H:%M:%S"), {}),
'up_time': (lambda: str(datetime.now() - self._startup_time), {}),
self.STR_STATUS: {
self.STR_STATUS_SENSOR: {
self.STR_STATUS_SENSOR_WIDTH: (self.get_sensor_width, {
# Meta data here
}),
self.STR_STATUS_SENSOR_HEIGHT: (self.get_sensor_height, {
# Meta data here
}),
self.STR_STATUS_SENSOR_BYTES: (self.get_sensor_bytes, {
# Meta data here
})
},
self.STR_STATUS_MANUFACTURER: (lambda: 'DLS/STFC', {
# Meta data here
}),
self.STR_STATUS_MODEL: (lambda: 'Odin [Excalibur2]', {
# Meta data here
}),
self.STR_STATUS_ERROR: (self.get_error, {
# Meta data here
}),
self.STR_STATUS_STATE: (self.get_state, {
# Meta data here
}),
self.STR_STATUS_POLL_ACTIVE: (lambda: self._poll_active, {
# Meta data here
}),
self.STR_STATUS_FEM_STATE: (self.get_fem_state, {
# Meta data here
}),
self.STR_STATUS_FEM_FRAMES: (self.get_fem_frames, {
# Meta data here
}),
self.STR_STATUS_FRAMES_ACQUIRED: (self.get_frames_acquired, {
# Meta data here
}),
self.STR_STATUS_FRAME_RATE: (self.get_frame_rate, {
# Meta data here
}),
self.STR_STATUS_ACQUISITION_COMPLETE: (self.get_acquisition_complete, {
# Meta data here
}),
self.STR_STATUS_LV_ENABLED: (self.get_lv_enabled, {
# Meta data here
}),
self.STR_STATUS_CALIBRATING: (self.get_calibrating_status, {
# Meta data here
}),
self.STR_STATUS_CALIBRATION: (self.get_calibration_bitmask, {
# Meta data here
}),
self.STR_STATUS_POWERCARD: powercard_tree,
self.STR_STATUS_EFUSE: efuse_tree,
self.STR_STATUS_SUPPLY: supply_tree,
self.STR_STATUS_FEM: fem_tree,
self.STR_STATUS_DACS: (self.get_dacs, None)
},
self.STR_CONFIG: {
self.STR_CONFIG_NUM_IMAGES: (self.get_num_images, self.set_num_images, {
# Meta data here
}),
self.STR_CONFIG_EXPOSURE_TIME: (self.get_exposure_time, self.set_exposure_time, {
# Meta data here
}),
self.STR_CONFIG_NUM_TEST_PULSES: (self.get_num_test_pulses, self.set_num_test_pulses, {
# Meta data here
}),
self.STR_CONFIG_SCAN_DAC_NUM: (self.get_scan_dac_num, self.set_scan_dac_num, {
# Meta data here
}),
self.STR_CONFIG_SCAN_DAC_START: (self.get_scan_dac_start, self.set_scan_dac_start, {
# Meta data here
}),
self.STR_CONFIG_SCAN_DAC_STOP: (self.get_scan_dac_stop, self.set_scan_dac_stop, {
# Meta data here
}),
self.STR_CONFIG_SCAN_DAC_STEP: (self.get_scan_dac_step, self.set_scan_dac_step, {
# Meta data here
}),
self.STR_CONFIG_TEST_PULSE_ENABLE: (self.get_test_pulse_enable, self.set_test_pulse_enable, {
'allowed_values': ExcaliburDefinitions.FEM_TEST_PULSE_NAMES
}),
self.STR_CONFIG_IMAGE_MODE: (self.get_image_mode, self.set_image_mode, {
'allowed_values': ExcaliburDefinitions.FEM_IMAGEMODE_NAMES
}),
self.STR_CONFIG_OPERATION_MODE: (self.get_operation_mode, self.set_operation_mode, {
'allowed_values': ExcaliburDefinitions.FEM_OPERATION_MODE_NAMES
}),
self.STR_CONFIG_LFSR_BYPASS: (self.get_lfsr_bypass, self.set_lfsr_bypass, {
'allowed_values': ExcaliburDefinitions.FEM_LFSR_BYPASS_MODE_NAMES
}),
self.STR_CONFIG_READ_WRITE_MODE: (self.get_read_write_mode, self.set_read_write_mode, {
'allowed_values': ExcaliburDefinitions.FEM_READOUT_MODE_NAMES
}),
self.STR_CONFIG_DISC_CSM_SPM: (self.get_disc_csm_spm, self.set_disc_csm_spm, {
'allowed_values': ExcaliburDefinitions.FEM_DISCCSMSPM_NAMES
}),
self.STR_CONFIG_EQUALIZATION_MODE: (self.get_equalization_mode, self.set_equalization_mode, {
'allowed_values': ExcaliburDefinitions.FEM_EQUALIZATION_MODE_NAMES
}),
self.STR_CONFIG_TRIGGER_MODE: (self.get_trigger_mode, self.set_trigger_mode, {
'allowed_values': ExcaliburDefinitions.FEM_TRIGMODE_NAMES
}),
self.STR_CONFIG_TRIGGER_POLARITY: (self.get_trigger_polarity, self.set_trigger_polarity, {
'allowed_values': ExcaliburDefinitions.FEM_TRIGPOLARITY_NAMES
}),
self.STR_CONFIG_CSM_SPM_MODE: (self.get_csm_spm_mode, self.set_csm_spm_mode, {
'allowed_values': ExcaliburDefinitions.FEM_CSMSPM_MODE_NAMES
}),
self.STR_CONFIG_COLOUR_MODE: (self.get_colour_mode, self.set_colour_mode, {
'allowed_values': ExcaliburDefinitions.FEM_COLOUR_MODE_NAMES
}),
self.STR_CONFIG_GAIN_MODE: (self.get_gain_mode, self.set_gain_mode, {
'allowed_values': ExcaliburDefinitions.FEM_GAIN_MODE_NAMES
}),
self.STR_CONFIG_COUNTER_SELECT: (self.get_counter_select, self.set_counter_select, {
# Meta data here
}),
self.STR_CONFIG_COUNTER_DEPTH: (self.get_counter_depth, self.set_counter_depth, {
'allowed_values': ['1', '6', '12', '24', 'dual12']
}),
self.STR_CONFIG_CAL_FILE_ROOT: (self.get_cal_file_root, self.set_cal_file_root, {
# Meta data here
}),
self.STR_CONFIG_ENERGY_THRESHOLD_0: (self.get_energy_threshold_0, self.set_energy_threshold_0, {
# Meta data here
}),
self.STR_CONFIG_ENERGY_THRESHOLD_1: (self.get_energy_threshold_1, self.set_energy_threshold_1, {
# Meta data here
}),
self.STR_CONFIG_ENERGY_DELTA: (self.get_energy_delta, self.set_energy_delta, {
# Meta data here
}),
self.STR_CONFIG_UDP_FILE: (self.get_udp_file, self.set_udp_file, {
# Meta data here
}),
self.STR_CONFIG_HV_BIAS: (self.get_hv_bias, self.set_hv_bias, {
# Meta data here
}),
self.STR_CONFIG_LV_ENABLE: (self.get_lv_enable, self.set_lv_enable, {
# Meta data here
}),
self.STR_CONFIG_HV_ENABLE: (self.get_hv_enable, self.set_hv_enable, {
# Meta data here
}),
self.STR_CONFIG_TEST_DAC_FILE: (self.get_test_dac_file, self.set_test_dac_file, {
# Meta data here
}),
self.STR_CONFIG_TEST_MASK_FILE: (self.get_test_mask_file, self.set_test_dac_file, {
# Meta data here
})
}
}
self._tree_status = ParameterTree(tree)
logging.debug("Excalibur parameter tree: %s", self._tree_status.tree)
self._executing_updates = True
self._read_efuse_ids = False
self._acquiring = False
self._frames_acquired = 0
self._hw_frames_acquired = 0
self._fem_frames_acquired = []
self._acq_frame_count = 0
self._acq_exposure = 0.0
self._acq_start_time = datetime.now()
self._acq_timeout = 0.0
self._comms_lock = threading.RLock()
self._param_lock = threading.RLock()
self._fast_update_time = datetime.now()
self._medium_update_time = datetime.now()
self._slow_update_time = datetime.now()
self._frame_start_count = 0
self._frame_count_time = None
self._calibration_required = True
self._moly_humidity_counter = 0
# Temporary 24 bit mode setup
# TODO: Remove this once 24 bit mode has been implemented within the firmware
self._24bit_mode = False
self._24bit_acquiring = False
self._24bit_params = None
#self._counter_select = 0
self._acquisition_loops = 0
# End of 24 bit mode
def init(self):
if self.test_mode is False:
# Perform a slow read
self.slow_read()
self._lv_toggle_required = False
with self._param_lock:
if self._lv_enabled == 0:
# We have started up with the lv not enabled so toggle in | |
'fn(i32, const u8*, usize) -> errsize',
source=self.__argname__), False
yield Export(
'__box_%s_flush' % box.name,
'fn(i32) -> err',
source=self.__argname__), False
# exports that need linking
for export in parent.exports:
if any(link.import_.box == box for link in export.links):
yield export.prebound(), len(export.boundargs) > 0
def _imports(self, box):
"""
Get imports that need linking.
Yields import.
"""
# implicit imports
yield Import(
'__box_abort',
'fn(err) -> noreturn',
source=self.__argname__)
yield Import(
'__box_write',
'fn(i32, const u8[size], usize size) -> errsize',
source=self.__argname__)
yield Export(
'__box_flush',
'fn(i32) -> err',
source=self.__argname__)
# imports that need linking
for import_ in box.imports:
if import_.link and import_.link.export.box != box:
yield import_.postbound()
def _exports(self, box):
"""
Get exports that need linking.
Yields export, needswrapper.
"""
# implicit exports
yield Export(
'__box_init', 'fn() -> err32',
source=self.__argname__), False
# exports that need linking
for export in box.exports:
if export.scope != box:
yield export.prebound(), (
len(export.boundargs) > 0 or
any(arg.isptr() for arg in export.args))
@staticmethod
def repr_wasmfarg(arg, name=None):
name = name if name is not None else arg.name
return ''.join([
'uint32_t' if arg.isptr() else
'int64_t' if arg.prim() == 'err64' else
'int32_t' if arg.prim().startswith('err') else
'int32_t' if arg.prim() == 'isize' else
'uint32_t' if arg.prim() == 'usize' else
'int%s_t' % arg.prim()[1:] if arg.prim().startswith('i') else
'uint%s_t' % arg.prim()[1:] if arg.prim().startswith('u') else
'???',
' ' if name else '',
name if name else ''])
@classmethod
def repr_wasmf(cls, fn, name=None, attrs=[]):
return ''.join(it.chain(
(attr + ('\n' if attr.startswith('__') else ' ')
for attr in it.chain(
(['__attribute__((noreturn))']
if fn.isnoreturn() and (
name is None or '*' not in name) else
[]) +
attrs)), [
'%s ' % cls.repr_wasmfarg(fn.rets[0], '') if fn.rets else
'void ',
name if name is not None else fn.alias,
'(',
', '.join(cls.repr_wasmfarg(arg, name)
for arg, name in zip(fn.args, fn.argnames()))
if fn.args else
'void',
')']))
def build_parent_c(self, output, parent, box):
super().build_parent_c(output, parent, box)
out = output.decls.append()
out.printf('//// %(box)s state ////')
out.printf('bool __box_%(box)s_initialized = false;')
if not self._abort_hook.link and not self._no_longjmp:
out.printf('jmp_buf *__box_%(box)s_jmpbuf = NULL;')
out.printf('extern uint32_t __box_%(box)s_jumptable[];')
out.printf('#define __box_%(box)s_exportjumptable '
'__box_%(box)s_jumptable')
output.decls.append('//// %(box)s exports ////')
for i, (import_, needsinit) in enumerate(
self._parentimports(parent, box)):
out = output.decls.append(
fn=output.repr_fn(import_),
fnptr=output.repr_fnptr(import_.prebound(), ''),
i=i)
out.printf('%(fn)s {')
with out.indent():
# inject lazy-init?
if needsinit:
out.printf('if (!__box_%(box)s_initialized) {')
with out.indent():
out.printf('int err = __box_%(box)s_init();')
out.printf('if (err) {')
with out.indent():
if import_.isfalible():
out.printf('return err;')
else:
out.printf('__box_abort(err);')
out.printf('}')
out.printf('}')
out.printf()
# use longjmp?
if (import_.isfalible() and
not self._abort_hook.link and
not self._no_longjmp):
with out.pushattrs(
pjmpbuf=import_.uniquename('pjmpbuf'),
jmpbuf=import_.uniquename('jmpbuf'),
err=import_.uniquename('err')):
out.printf('jmp_buf *%(pjmpbuf)s = '
'__box_%(box)s_jmpbuf;')
out.printf('jmp_buf %(jmpbuf)s;')
out.printf('__box_%(box)s_jmpbuf = &%(jmpbuf)s;')
out.printf('int %(err)s = setjmp(%(jmpbuf)s);')
out.printf('if (%(err)s) {')
with out.indent():
out.printf('__box_%(box)s_jmpbuf = %(pjmpbuf)s;')
out.printf('return %(err)s;')
out.printf('}')
# jump to jumptable entry
out.printf('%(return_)s((%(fnptr)s)\n'
' __box_%(box)s_exportjumptable[%(i)d])(%(args)s);',
return_=('return ' if import_.rets else '')
if not (import_.isfalible() and
not self._abort_hook.link and
not self._no_longjmp) else
('%s = ' % output.repr_arg(import_.rets[0],
import_.retname())
if import_.rets else ''),
args=', '.join(map(str, import_.argnamesandbounds())))
if import_.isnoreturn():
# kinda wish we could apply noreturn to C types...
out.printf('__builtin_unreachable();')
# use longjmp?
if (import_.isfalible() and
not self._abort_hook.link and
not self._no_longjmp):
with out.pushattrs(
pjmpbuf=import_.uniquename('pjmpbuf')):
out.printf('__box_%(box)s_jmpbuf = %(pjmpbuf)s;')
if import_.rets:
out.printf('return %(ret)s;',
ret=import_.retname())
out.printf('}')
output.decls.append('//// %(box)s imports ////')
# redirect hooks if necessary
if not self._abort_hook.link:
if not self._no_longjmp:
# use longjmp to recover from explicit aborts
output.includes.append('<setjmp.h>')
out = output.decls.append(
fn=output.repr_fn(self._abort_hook,
self._abort_hook.name))
out.printf('%(fn)s {')
with out.indent():
out.printf('__box_%(box)s_initialized = false;')
out.printf('if (__box_%(box)s_jmpbuf) {')
with out.indent():
out.printf('longjmp(*__box_%(box)s_jmpbuf, err);')
out.printf('} else {')
with out.indent():
out.printf('__box_abort(err);')
out.printf('}')
out.printf('}')
else:
# just redirect to parent's __box_abort
out = output.decls.append(
abort_hook=self._abort_hook.name,
doc='redirect %(abort_hook)s -> __box_abort')
out.printf('#define %(abort_hook)s __box_abort')
if not self._write_hook.link:
out = output.decls.append(
write_hook=self._write_hook.name,
doc='redirect %(write_hook)s -> __box_write')
out.printf('#define %(write_hook)s __box_write')
if not self._flush_hook.link:
out = output.decls.append(
flush_hook=self._flush_hook.name,
doc='redirect %(flush_hook)s -> __box_flush')
out.printf('#define %(flush_hook)s __box_flush')
# wrappers?
for export in (export
for export, needswrapper in self._parentexports(parent, box)
if needswrapper):
out = output.decls.append(
fn=output.repr_fn(
export.postbound(),
name='__box_%(box)s_export_%(alias)s'),
alias=export.alias)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(return_)s%(alias)s(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(map(str, export.argnamesandbounds())))
out.printf('}')
# import jumptable
out = output.decls.append()
out.printf('const uint32_t __box_%(box)s_importjumptable[] = {')
with out.indent():
for export, needswrapper in self._parentexports(parent, box):
out.printf('(uint32_t)%(prefix)s%(alias)s,',
prefix='__box_%(box)s_export_' if needswrapper else '',
alias=export.alias)
out.printf('};')
# init
output.decls.append('//// %(box)s init ////')
out = output.decls.append()
out.printf('int __box_%(box)s_init(void) {')
with out.indent():
out.printf('int err;')
out.printf('if (__box_%(box)s_initialized) {')
with out.indent():
out.printf('return 0;')
out.printf('}')
out.printf()
if box.roommates:
out.printf('// bring down any overlapping boxes')
for i, roommate in enumerate(box.roommates):
with out.pushattrs(roommate=roommate.name):
out.printf('extern int __box_%(roommate)s_clobber(void);')
out.printf('err = __box_%(roommate)s_clobber();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('// load the box if unloaded')
out.printf('err = __box_%(box)s_load();')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('// call box\'s init')
out.printf('err = __box_%(box)s_postinit('
'__box_%(box)s_importjumptable);')
out.printf('if (err) {')
with out.indent():
out.printf('return err;')
out.printf('}')
out.printf()
out.printf('__box_%(box)s_initialized = true;')
out.printf('return 0;')
out.printf('}')
out = output.decls.append()
out.printf('int __box_%(box)s_clobber(void) {')
with out.indent():
out.printf('__box_%(box)s_initialized = false;')
out.printf('return 0;')
out.printf('}')
def build_parent_ld(self, output, parent, box):
super().build_parent_ld(output, parent, box)
if not output.no_sections:
out = output.sections.append(
box_memory=self._jumptable.memory.name,
section='.box.%(box)s.%(box_memory)s',
memory='box_%(box)s_%(box_memory)s')
out.printf('__box_%(box)s_jumptable = __%(memory)s_start;')
def build_c(self, output, box):
super().build_c(output, box)
out = output.decls.append()
out.printf('//// awsm glue ////')
output.decls.append(BOUNDS_CHECKS[self._bounds_check])
output.decls.append(COMMON,
stack_size=box.stack.size)
out = output.decls.append()
out.printf('//// jumptable implementation ////')
out.printf('const uint32_t *__box_importjumptable;')
out = output.decls.append()
out.printf('int __box_init(const uint32_t *importjumptable) {')
with out.indent():
if self.data_init_hook.link:
out.printf('// data inited by %(hook)s',
hook=self.data_init_hook.link.export.source)
out.printf()
else:
out.printf('// load data')
out.printf('extern uint32_t __data_init_start;')
out.printf('extern uint32_t __data_start;')
out.printf('extern uint32_t __data_end;')
out.printf('const uint32_t *s = &__data_init_start;')
out.printf('for (uint32_t *d = &__data_start; '
'd < &__data_end; d++) {')
with out.indent():
out.printf('*d = *s++;')
out.printf('}')
out.printf()
if self.bss_init_hook.link:
out.printf('// bss inited by %(hook)s',
hook=self.bss_init_hook.link.export.source)
out.printf()
else:
out.printf('// zero bss')
out.printf('extern uint32_t __bss_start;')
out.printf('extern uint32_t __bss_end;')
out.printf('for (uint32_t *d = &__bss_start; '
'd < &__bss_end; d++) {')
with out.indent():
out.printf('*d = 0;')
out.printf('}')
out.printf()
if not self.bss_init_hook.link:
out.printf('for (uint32_t *d = (uint32_t*)&__memory_start;\n'
' d < (uint32_t*)&__memory_end; d++) {')
with out.indent():
out.printf('*d = 0;')
out.printf('}')
out.printf()
out.printf('// set import jumptable')
out.printf('__box_importjumptable = importjumptable;')
out.printf()
out.printf('// init libc')
out.printf('extern void __libc_init_array(void);')
out.printf('__libc_init_array();')
out.printf()
out.printf('// populate wasm state')
out.printf('populate_table();')
out.printf('populate_globals();')
out.printf('populate_memory();')
out.printf('wasmf___wasm_call_ctors();')
out.printf()
out.printf('return 0;')
out.printf('}')
output.decls.append('//// imports ////')
for i, import_ in enumerate(self._imports(box)):
out = output.decls.append(
fn=self.repr_wasmf(import_, name='env_%(alias)s'),
fnptr=output.repr_fnptr(import_, ''),
alias=import_.alias,
i=i)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(return_)s((%(fnptr)s)\n'
' __box_importjumptable[%(i)d])(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(
'to_ptr(%s)' % name if arg.isptr() else name
for arg, name in import_.zippedargsandbounds()))
if import_.isnoreturn():
# kinda wish we could apply noreturn to C types...
out.printf('__builtin_unreachable();')
out.printf('}')
output.decls.append('//// exports ////')
for export, _ in self._exports(box):
out = output.decls.append(
'%(wasmf)s;',
wasmf=self.repr_wasmf(export,
name='wasmf_%(alias)s', attrs=['extern']),
alias=export.alias)
for export, needswrapper in self._exports(box):
if not needswrapper:
continue
out = output.decls.append(
fn=output.repr_fn(
export.postbound(),
name='export_%(alias)s'),
alias=export.alias)
out.printf('%(fn)s {')
with out.indent():
out.printf('%(return_)swasmf_%(alias)s(%(args)s);',
return_='return ' if import_.rets else '',
args=', '.join(
'from_ptr(%s)' % name if arg.isptr() else name
for arg, name in export.zippedargsandbounds()))
out.printf('}')
out = output.decls.append(doc='box-side jumptable')
out.printf('__attribute__((used, section(".jumptable")))')
out.printf('const uint32_t __box_exportjumptable[] = {')
with out.pushindent():
for i, (export, needswrapper) in enumerate(self._exports(box)):
out.printf('(uint32_t)%(prefix)s%(alias)s,',
prefix='export_'
if needswrapper else
'wasmf_'
if export.alias != '__box_init' else
'',
alias=export.alias)
if i == 0:
# stack operations follow init
out.printf('(uint32_t)__box_push,')
out.printf('(uint32_t)__box_pop,')
out.printf('};')
def build_ld(self, output, box):
out = output.decls.append()
out.printf('%(symbol)-16s = DEFINED(%(symbol)s) '
'? %(symbol)s : %(value)#010x;',
symbol='__memory_min',
value=self._memory.size)
out.printf('%(symbol)-16s = DEFINED(%(symbol)s) '
'? %(symbol)s : %(value)#010x;',
symbol='__table_min',
value=self._table.size)
if not output.no_sections:
out = output.sections.append(
section='.jumptable',
memory=self._jumptable.memory.name)
out.printf('. = ALIGN(%(align)d);')
out.printf('__jumptable_start = .;')
out.printf('%(section)s . : {')
with out.pushindent():
out.printf('__jumptable = .;')
out.printf('KEEP(*(.jumptable))')
out.printf('} > %(MEMORY)s')
out.printf('. = ALIGN(%(align)d);')
out.printf('__jumptable_end = .;')
out = output.sections.append(
section='.memory',
memory=self._memory.memory.name)
out.printf('. = ALIGN(%(align)d);')
out.printf('__memory_start = .;')
out.printf('%(section)s . (NOLOAD) : {')
with out.pushindent():
out.printf('__memory = .;')
out.printf('*(.memory)')
out.printf('} > %(MEMORY)s')
out.printf('. += __memory_min;')
out.printf('. = ALIGN(%(align)d);')
out.printf('__memory_end = .;')
out = output.sections.append(
section='.table',
memory=self._table.memory.name)
out.printf('. = ALIGN(%(align)d);')
out.printf('__table_start = .;')
out.printf('%(section)s . (NOLOAD) : {')
with out.pushindent():
out.printf('__table = .;')
out.printf('*(.table)')
out.printf('} > %(MEMORY)s')
out.printf('. += __table_min;')
out.printf('. = ALIGN(%(align)d);')
out.printf('__table_end = .;')
super().build_ld(output, box)
def build_mk(self, output, box):
assert not output.no_awsm, ("The runtime `%s` requires the aWsm "
"compiler, please provide --output.mk.awsm." % self.__argname__)
assert not output.no_llvm, ("The runtime `%s` requires an LLVM "
"compiler, please provide --output.mk.llvm_cc=clang."
% self.__argname__)
assert not output.no_wasm, ("The runtime `%s` requires a WebAssembly "
"compiler, please | |
<filename>bayesian_framework/inference/stochastic_models/stochastic_models.py
from __future__ import annotations
import collections
from abc import ABC, abstractmethod
from typing import NoReturn, Tuple, Union
import numpy as np
from scipy.stats import gamma, multivariate_normal
import bayesian_framework.shared.covariance_utils as cov_utils
import utils.matrix_utils as matrix_utils
from .covariance_type import CovarianceType
from .noise_type import NoiseType
class GeneralStochasticModel(ABC):
"""
The generated noise data structure (model), Model has the following required fields.
Depending on the noise source type, the data structure may also have other type dependent fields.
.update (function handle) <<optional>> function to update the internal structure of noise source;
.covarianceType (string) .
"""
@property
@abstractmethod
def noise_type(self) -> NoiseType:
"""
Noise source type (NoiseType enum).
:return: name of noise type.
"""
pass
@property
@abstractmethod
def dim(self) -> int:
"""
Noise source dimension (scalar number).
:return: noise dimension.
"""
pass
@property
@abstractmethod
def covariance_type(self) -> CovarianceType:
"""
Type of covariance (CovarianceType enum).
:return: Type of covariance
"""
pass
@property
@abstractmethod
def covariance(self) -> np.ndarray:
"""
Covariance matrix. None if covariance is not applicable for particular problem
:return: Covariance matrix (numpy.matrix).
"""
pass
@property
@abstractmethod
def mean(self) -> np.ndarray:
"""
Expectation, mathematical expectation, EV, average, mean value, mean, or first moment.
:return: 'Expected value' (first moment) np.array or scalar number.
"""
pass
@abstractmethod
def sample(self, size: int) -> np.ndarray:
"""
Method to generate N noise source samples.
:type size: int
:return: nd-array of noise source samples with dimension equal to "dimension" x N.
"""
pass
@abstractmethod
def likelihood(self, samples: np.ndarray) -> np.ndarray:
"""
Method to evaluate the likelihood of a given noise source sample.
:type samples: np.array
:return: likelihood of a given noise source sample (scalar number).
"""
pass
def update(self, **kwargs) -> NoReturn:
pass
@property
def n_components(self) -> int:
return 0
@property
def weights(self) -> Union[np.ndarray, None]:
return None
class GammaStochasticModel(GeneralStochasticModel):
def __init__(self, *, shape, scale):
is_shape_list = isinstance(shape, collections.Sequence)
is_scale_list = isinstance(scale, collections.Sequence)
if is_shape_list and is_scale_list:
if len(shape) != len(scale):
raise Exception("dimensions mismatch of shape and scale")
if is_shape_list and not is_scale_list or not is_shape_list and is_scale_list:
raise Exception("shape and scale both should be sequence or scalar at the same time")
self._dimension = len(shape) if is_shape_list else 1
self._shape = shape
self._scale = scale
def __str__(self) -> str:
return "Gamma process. Shape: {shape}; Scale: {scale}".format(shape=self._shape, scale=self._scale)
def likelihood(self, samples: np.ndarray) -> np.ndarray:
if samples is None:
raise Exception("<samples> should be n-dim numpy array")
return np.atleast_1d(
gamma.pdf(samples.T, self._shape, scale=self._scale)
)
def sample(self, size: int) -> np.ndarray:
if size is None:
raise Exception("<size> is mandatory but was not specified")
if size <= 0:
raise Exception("<size> is negative or equal to zero. Must be integer greater than zero")
return np.atleast_2d(
gamma.rvs(self._shape, scale=self._scale, size=size).T
)
@property
def mean(self) -> np.ndarray:
return np.atleast_1d(
gamma.mean(self._shape, scale=self._scale)
)
@property
def covariance(self):
return np.atleast_2d(
gamma.var(self._shape, scale=self._scale)
)
@property
def covariance_type(self) -> CovarianceType:
return CovarianceType.full
@property
def dim(self) -> int:
return self._dimension
@property
def noise_type(self) -> NoiseType:
return NoiseType.gamma
class GaussianStochasticModel(GeneralStochasticModel):
def __init__(self, *, mean, covariance, covariance_type):
if isinstance(mean, collections.Sequence):
if not matrix_utils.is_square_2d_array(covariance):
raise Exception("Covariance matrix must be square matrix")
if len(mean) != matrix_utils.shape_square_2d_array(covariance):
raise Exception("Dimensions mismatch of mean and covariance matrix")
self._dimension = len(mean)
self._mean = np.asarray(mean)
else:
self._dimension = 1
self._mean = np.atleast_1d(mean)
is_diag = covariance_type in (CovarianceType.diag, CovarianceType.sqrt_diag)
self._covariance_type = covariance_type
self._covariance = np.atleast_2d(
matrix_utils.ensure_diagonal_matrix(covariance) if is_diag else covariance
)
self._covariance_full = np.atleast_2d(
cov_utils.to_full_covariance(covariance, covariance_type)
)
def __str__(self) -> str:
return "Gaussian noise. Mean: {mean}; Covariance: {cov}".format(mean=self._mean, cov=self._covariance_full)
@property
def noise_type(self) -> NoiseType:
return NoiseType.gaussian
@property
def dim(self) -> int:
return self._dimension
@property
def covariance_type(self) -> CovarianceType:
return self._covariance_type
@property
def covariance(self) -> np.ndarray:
return self._covariance
@property
def mean(self) -> np.ndarray:
return self._mean
def sample(self, size: int) -> np.ndarray:
return np.atleast_2d(
multivariate_normal.rvs(mean=self._mean, cov=self._covariance_full, size=size).T
)
def likelihood(self, samples: np.ndarray) -> np.ndarray:
if samples is None:
raise Exception("<samples> should be n-dim numpy array")
return np.atleast_1d(
multivariate_normal.pdf(samples.T, mean=self._mean, cov=self._covariance_full)
)
class ComboGaussianStochasticModel(GeneralStochasticModel):
def __init__(self, *, dimension, sources):
if not isinstance(sources, collections.Sequence):
raise Exception("sources must be sequence")
cov_type = sources[0].covariance_type
if not all(source.covariance_type == cov_type for source in sources):
raise Exception("all source items must have same covariance_type")
cov = matrix_utils.put_matrices_into_zero_matrix_one_by_one(dimension,
list(map(lambda x: x.covariance, sources)))
self._dimension = dimension
self._covariance_type = sources[0].covariance_type
self._mean = np.atleast_1d([sub_mean for source in sources for sub_mean in source.mean])
self._covariance = np.atleast_2d(cov)
self._covariance_full = np.atleast_2d(
cov_utils.to_full_covariance(cov, cov_type)
)
self._sources = sources
if not np.size(self._mean) == dimension:
raise Exception("length of mean vector must be equal to dimension")
def __str__(self) -> str:
return "Combo Gaussian.\nMean:\n {mean};\nCovariance:\n {cov}\n\n" \
.format(mean=self._mean, cov=self._covariance_full)
@property
def noise_type(self) -> NoiseType:
return NoiseType.combo_gaussian
@property
def dim(self) -> int:
return self._dimension
@property
def covariance_type(self) -> CovarianceType:
return self._covariance_type
@property
def covariance(self) -> np.ndarray:
return self._covariance
@property
def mean(self) -> np.ndarray:
return self._mean
def sample(self, size: int) -> np.ndarray:
return np.atleast_2d(
multivariate_normal.rvs(loc=self._mean, scale=self._covariance_full, size=size).T
)
def likelihood(self, samples: np.ndarray) -> np.ndarray:
return np.atleast_1d(
multivariate_normal.pdf(samples.T, loc=self._mean, scale=self._covariance_full)
)
def update(self, **kwargs) -> NoReturn:
shift = 0
for source in self._sources:
source.mean = self._mean[shift: shift + source.dim]
source.covariance = self._covariance[shift: shift + source.dim, shift: shift + source.dim]
shift += source.dim
class ComboStochasticModel(ComboGaussianStochasticModel):
def __str__(self) -> str:
return "Combo noise.\nMean:\n {0};\nCovariance:\n {1}\n\n".format(self.mean, self._covariance_full)
@property
def noise_type(self) -> NoiseType:
return NoiseType.combo
def sample(self, size: int) -> np.ndarray:
result = np.zeros((size, self.dim))
shift = 0
for source in self._sources:
result[shift: shift + source.dim, :] = source.sample(size)
shift += source.dim
return np.atleast_2d(result)
def likelihood(self, samples: np.ndarray) -> np.ndarray:
_, size = np.shape(samples)
llh = np.zeros(size)
shift = 0
for source in self._sources:
llh = llh * source.likelihood(samples[shift: shift + source.dim, :])
shift += source.dim
return np.atleast_1d(llh)
class GaussianMixtureStochasticModel(GeneralStochasticModel):
def __init__(self, *, mixture_size, mean, covariance, covariance_type, weights=None):
self._n_components = mixture_size
self._mean = np.atleast_2d(mean)
self._covariance_type = covariance_type
self._covariance = np.atleast_3d(covariance)
cov_full = map(lambda x: cov_utils.to_full_covariance(x, covariance_type), covariance)
self._covariance_full = np.atleast_3d(list(cov_full))
self._dimension, _ = np.shape(mean)
self._weights = np.ones(mixture_size) / mixture_size if weights is None else np.asarray(weights / sum(weights))
def __str__(self) -> str:
return "Gaussian Mixture.\nWeights:\n {weights};\nMean:\n {mean};\nCovariance:\n {cov}\n\n" \
.format(mean=self._mean, cov=self._covariance_full, weights=self.weights)
@property
def noise_type(self) -> NoiseType:
return NoiseType.gaussian_mixture
@property
def dim(self) -> int:
return self._dimension
@property
def covariance_type(self) -> CovarianceType:
return self._covariance_type
@property
def covariance(self) -> np.ndarray:
return self._covariance
@property
def mean(self) -> np.ndarray:
return self._mean
@property
def n_components(self) -> int:
return self._n_components
@property
def weights(self) -> np.ndarray:
return self._weights
def sample(self, size) -> np.ndarray:
samples_component = np.random.multinomial(size, self.weights)
return np.atleast_2d(
np.hstack([
np.transpose(np.random.multivariate_normal(mean, covariance, int(sample)))
for (mean, covariance, sample) in zip(self.mean, self.covariance, samples_component)
])
)
def likelihood(self, samples: np.ndarray) -> np.ndarray:
# evidence is used as likelihood, because the total data probability for a given data vector X[i]
_, _, llh, _ = self.probability(samples)
return np.atleast_1d(llh)
def probability(
self,
samples: np.ndarray,
evidence_weights: np.ndarray = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Calculates any of the related (through Bayes rule) probabilities of a Gaussian Mixture Model and
a given data set (samples). Probabilities are:
P(X|C) * P(C) likelihood * prior
P(C|X) = ----------------- posterior = --------------------
P(X) evidence
where C is the component classes (Gaussian) of the GMM and X is the data.
:param samples:
:param evidence_weights:
:return: Tuple of: Prior probability, Likelihood, Evidence, Posterior probability
Prior the prior (without seeing data) probability of a component density generating any given
data vector, i.e. P(C[i]). This is simply the same as the prior mixing weights
Likelihood matrix where the j, i-th entry is the likelihood of input column vector i (of samples)
conditioned on component density j, i.e. P(X[i] | C[j]);
Evidence matrix where the i-th entry is the total data probability for a given data vector X[i], i.e.
P(X[i]) = sum_over_all_j { P(X[i] | C[j]) };
Posterior matrix where the j, i-th entry is the posterior probability (after seeing the data) that
a component density j has generated a specific data vector X[i], i.e.
P(C[j] | X[i]) (class posterior probabilities).
"""
prior = np.copy(self.weights)
llh = np.column_stack([
multivariate_normal.pdf(samples.T, mean=mean, cov=cov)
for (mean, cov) in zip(self.mean, self._covariance_full)
])
evidence_w = np.ones(np.shape(llh)) if evidence_weights is None else evidence_weights
evidence = (llh / evidence_w) @ prior
posterior = llh / (np.reciprocal(prior)[:, np.newaxis] @ evidence[np.newaxis, :]).T
| |
mnemonic_builder[precision](precision, cond_specifier)
def expand_sse_avx_bool_comparison(optree):
""" Expand a comparison between numeric values to a boolean output
to a comparison with numeric format for result (supported by SSE/AVX)
and a cast to boolean format to match operand prototype """
lhs = optree.get_input(0)
rhs = optree.get_input(1)
specifier = optree.specifier
input_format = lhs.get_precision()
return TypeCast(
Comparison(lhs, rhs, specifier=specifier, precision=input_format),
precision=optree.get_precision()
)
def generate_sse_avx_comparison(optree):
return generate_sse_avx_select_boolean_value(optree, optree.get_precision())
def squash_sse_avx_cst_select(optree):
""" Convert Select(cond, 0, -1) into cond
and Select(cond, -1, 0) into not(cond) """
assert isinstance(optree, Select)
cond = optree.get_input(0)
lhs = optree.get_input(1)
rhs = optree.get_input(2)
op_prec = optree.get_precision()
assert lhs.get_precision() == rhs.get_precision()
# insert a TypeCast if select operands and optree formats do not match
if op_prec != lhs.get_precision():
result_prec = lhs.get_precision()
wrapper = lambda op: TypeCast(op, precision=op_prec)
else:
result_prec = op_prec
wrapper = lambda op: op
cond_lhs = cond.get_input(0)
cond_rhs = cond.get_input(1)
new_cond = cond.copy(copy_map={cond_lhs: cond_lhs, cond_rhs: cond_rhs})
cond_prec = cond_lhs.get_precision()
new_cond.set_precision(cond_prec)
if cond_prec != result_prec:
# if condition and result formats do not match, we need to insert a cast
new_cond = TypeCast(new_cond, precision=result_prec)
if is_vector_cst_with_value(lhs, VIRTUAL_CST_MASK_M1) and is_vector_cst_with_value(rhs, VIRTUAL_CST_MASK_0):
return wrapper(new_cond)
elif is_vector_cst_with_value(lhs, VIRTUAL_CST_MASK_0) and is_vector_cst_with_value(rhs, VIRTUAL_CST_MASK_M1):
if isinstance(cond, Comparison):
opposed_specifier = invert_comp_specifier(cond.specifier)
if isinstance(new_cond, TypeCast):
new_cond.get_input(0).specifier = opposed_specifier
else:
new_cond.specifier = opposed_specifier
elif isinstance(cond, LogicalNot):
if isinstance(new_cond, TypeCast):
# transform TypeCast(LogicalNot(<op>)) into TypeCast(<op>)
new_cond.set_input(0, cond.get_input(0))
else:
new_cond = new_cond.get_input(0)
elif isinstance(cond, (LogicalAnd, LogicalOr)):
if isinstance(new_cond, TypeCast):
# transform TypeCast(<op>) into TypeCast(LogicalNot(<op>))
new_cond.set_input(0, LogicalNot(new_cond.get_input(0), precision=cond.precision))
else:
new_cond = LogicalNot(new_cond.get_input(0), precision=new_cond.precision)
else:
Log.report(Log.Error, "Optree's not supprted in squash_sse_avx_cst_select: {}", cond)
return wrapper(new_cond)
else:
raise NotImplementedError
class VIRTUAL_CST_MASK:
""" Virtual constant value for mask,
used to avoid confusion with constant with identical numerical value
but different physical values when manipulating binary mask """
class VIRTUAL_CST_MASK_M1_Class(VIRTUAL_CST_MASK):
def __eq__(self, rhs):
return isinstance(rhs, VIRTUAL_CST_MASK_M1_Class)
def __str__(self):
return "VIRTUAL_CST_MASK_M1"
def __repr__(self):
return "VIRTUAL_CST_MASK_M1"
class VIRTUAL_CST_MASK_0_Class(VIRTUAL_CST_MASK):
def __eq__(self, rhs):
return isinstance(rhs, VIRTUAL_CST_MASK_0_Class)
def __str__(self):
return "VIRTUAL_CST_MASK_0"
def __repr__(self):
return "VIRTUAL_CST_MASK_0"
VIRTUAL_CST_MASK_M1 = VIRTUAL_CST_MASK_M1_Class()
VIRTUAL_CST_MASK_0 = VIRTUAL_CST_MASK_0_Class()
def convert_select_to_logic(optree):
""" Convert Select(cond, a, b) into
BitLogicOr(
BitLogicAnd(Select(cond, -1, 0), a),
BitLogicAnd(Select(cond, 0, -1, b))
)
"""
assert isinstance(optree, Select)
cond = optree.get_input(0)
lhs = optree.get_input(1)
rhs = optree.get_input(2)
prec = optree.get_precision()
# determining integer precision
C0 = build_format_constant(VIRTUAL_CST_MASK_0, prec)
CM1 = build_format_constant(VIRTUAL_CST_MASK_M1, prec)
result = BitLogicOr(
BitLogicAnd(
Select(
cond,
CM1,
C0,
precision=prec
),
lhs,
precision=prec
),
BitLogicAnd(
Select(
cond,
C0,
CM1,
precision=prec
),
rhs,
precision=prec
),
precision=prec
)
Log.report(
Log.Verbose,
"legalizing Select(\n\t{},\n\t{}\n\t{}\n) into {}".format(
cond, lhs, rhs,
# TODO/FIXME: do not let explicit call to get_str in prod
# as it will be executed even if verbosity level if not enough
# to display this message
result.get_str(depth=3, display_precision=True)
)
)
return result
def linearize_2d_tableload(optree):
""" convert TableLoad(table, index_d0, index_d1) into
TableLoad(table, index_d0 * dim(table)[1] + index_d1 """
assert isinstance(optree, TableLoad)
table = optree.get_input(0)
assert len(table.dimensions) >= 1
index_0 = optree.get_input(1)
index_1 = optree.get_input(2)
index_prec = index_0.get_precision()
prec = optree.get_precision()
table_ptr = TypeCast(table, precision=ML_Pointer_Format(prec.get_scalar_format()))
result = TableLoad(
# TODO: TyoeCast to convert for multi-dim to linear table required
table_ptr,
Addition(
Multiplication(
index_0,
build_format_constant(table.dimensions[1], precision=index_prec),
precision=index_prec
),
index_1,
precision=index_prec
),
precision=prec
)
Log.report(
Log.Verbose,
"legalizing {} into {}".format(
optree.get_str(display_precision=True),
result.get_str(display_precision=True)
)
)
return result
def expand_sse_comparison(optree):
""" SSE only supports eq/gt/lt predicate for integer comparison,
thus all other must be expanded """
assert isinstance(optree, Comparison)
lhs = optree.get_input(0)
rhs = optree.get_input(1)
op_prec = optree.get_precision()
if optree.specifier is Comparison.LessOrEqual:
return BitLogicOr(
Comparison(lhs, rhs, specifier=Comparison.Less, precision=op_prec),
Comparison(lhs, rhs, specifier=Comparison.Equal, precision=op_prec),
precision=op_prec
)
elif optree.specifier is Comparison.NotEqual:
return BitLogicOr(
Comparison(lhs, rhs, specifier=Comparison.Less, precision=op_prec),
Comparison(lhs, rhs, specifier=Comparison.Greater, precision=op_prec),
precision=op_prec
)
elif optree.specifier is Comparison.GreaterOrEqual:
return BitLogicOr(
Comparison(lhs, rhs, specifier=Comparison.Greater, precision=op_prec),
Comparison(lhs, rhs, specifier=Comparison.Equal, precision=op_prec),
precision=op_prec
)
else:
raise NotImplementedError
# TODO refactor this asap with above function
def expand_avx2_comparison(optree):
""" AVX2 only supports eq/gt predicates for integer comparison,
thus all other must be legalized to use only those predicates """
assert isinstance(optree, Comparison)
lhs = optree.get_input(0)
rhs = optree.get_input(1)
op_prec = optree.get_precision()
if optree.specifier is Comparison.LessOrEqual:
# x ≤ y <=> (x < y) || (x == y)
return BitLogicOr(
Comparison(lhs, rhs, specifier=Comparison.Less, precision=op_prec),
Comparison(lhs, rhs, specifier=Comparison.Equal, precision=op_prec),
precision=op_prec
)
elif optree.specifier is Comparison.Less:
# cmplt x, y <=> cmpgt y, x
return Comparison(rhs, lhs, specifier = Comparison.Greater,
precision = op_prec)
elif optree.specifier is Comparison.NotEqual:
# x ≠ y <=> !(x == y)
return BitLogicNegate(
Comparison(lhs, rhs, specifier = Comparison.Equal,
precision = op_prec),
precision = op_prec
)
elif optree.specifier is Comparison.GreaterOrEqual:
# x ≥ y <=> (x > y) || (x == y)
return BitLogicOr(
Comparison(lhs, rhs, specifier = Comparison.Greater,
precision = op_prec),
Comparison(lhs, rhs, specifier = Comparison.Equal,
precision = op_prec),
precision = op_prec
)
else:
raise NotImplementedError
def expand_vec_mantissa_extraction(optree):
""" Expand a vector MantissaExtraction operation into its
And & Or counterparts """
assert isinstance(optree, MantissaExtraction)
op_in = optree.get_input(0)
precision = optree.get_precision()
bias = precision.get_scalar_format().get_bias()
p = precision.get_scalar_format().get_precision()
def build_vec_cst(cst_value, precision):
vec_size = precision.get_vector_size()
return Constant([cst_value] * vec_size, precision=precision)
return BitLogicOr(
BitLogicAnd(
op_in,
build_vec_cst(-(S2**(1 + bias) - S2**(1 + bias - p)), precision),
precision=precision
),
build_vec_cst(1.0, precision),
precision=precision,
tag="exp_mant_extraction"
)
def error_raise_fct(optree):
Log.report(
Log.Error,
"Generation of the following node is only supported by a dummy operator in x86 backend.: \n {}",
optree
)
raise NotImplementedError
# Placeholder operator triggering an error when used for code generation
# This operator is generally used to declare a dummy implementation
# such that the m(128/256)_promotion passes assume that the node is supported
# and promotes it. The promoted node will later be optimized (for example select
# expansion) and this operator shall never be used
ERROR_OPERATOR = ComplexOperator(optree_modifier=error_raise_fct)
sse_c_code_generation_table = {
Min: {
None: {
lambda optree: True: {
type_strict_match(*(3*(ML_SSE_m128_v4float32,))):
ImmIntrin("_mm_min_ps", arity = 2),
},
},
},
Max: {
None: {
lambda optree: True: {
type_strict_match(*(3*(ML_SSE_m128_v4float32,))):
ImmIntrin("_mm_max_ps", arity = 2),
},
},
},
Select: {
None: {
pred_vector_select_mone_zero: {
type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32):
ComplexOperator(squash_sse_avx_cst_select),
type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32):
ComplexOperator(squash_sse_avx_cst_select),
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
ComplexOperator(squash_sse_avx_cst_select),
},
not_pred_vector_select_one_zero: {
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
ComplexOperator(convert_select_to_logic),
}
},
},
MantissaExtraction: {
None: {
lambda _: True: {
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
ComplexOperator(optree_modifier=expand_vec_mantissa_extraction),
},
}
},
Comparison: {
Comparison.NotEqual: {
lambda _: True: {
type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32):
ComplexOperator(expand_sse_comparison),
type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32):
ComplexOperator(expand_sse_comparison),
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32):
ComplexOperator(expand_sse_avx_bool_comparison),
type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32):
ComplexOperator(expand_sse_avx_bool_comparison),
type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
Comparison.Equal: {
lambda _: True: {
type_strict_match_or_list([
(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match_or_list([
(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
Comparison.LessOrEqual: {
lambda _: True: {
type_strict_match_or_list([
(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32)
]):
ComplexOperator(expand_sse_comparison),
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match_or_list([
(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
Comparison.Less: {
lambda _: True: {
type_strict_match_or_list([
(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match_or_list([
(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
Comparison.Greater: {
lambda _: True: {
type_strict_match_or_list([
(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match_or_list([
(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
Comparison.GreaterOrEqual: {
lambda _: True: {
type_strict_match_or_list([
(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32)
]):
ComplexOperator(expand_sse_comparison),
type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32):
DynamicOperator(generate_sse_avx_comparison),
# 3 Dummy operators used to allow m128_promotion to promote squashable comparison
type_strict_match_or_list([
(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32),
(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32, ML_SSE_m128_v4float32)]):
ComplexOperator(expand_sse_avx_bool_comparison),
}
},
},
Addition: {
None: {
lambda _: True: {
type_strict_match(*(3*(ML_SSE_m128_v1float32,))):
_mm_add_ss(FO_Arg(0), FO_Arg(1)),
type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32):
_mm_cvtss_f32(_mm_add_ss(_mm_set_ss(FO_Arg(0)),
_mm_set_ss(FO_Arg(1)))),
# vector addition
type_strict_match(*(3*(ML_SSE_m128_v4float32,))):
XmmIntrin("_mm_add_ps", arity = 2),
},
},
},
BitLogicAnd: {
None: {
lambda optree: True: {
type_strict_match(*(3*(ML_SSE_m128_v4float32,))):
XmmIntrin("_mm_and_ps", arity = 2,
output_precision = ML_SSE_m128_v4float32),
},
},
},
BitLogicOr: {
None: {
lambda optree: True: {
type_strict_match(*(3*(ML_SSE_m128_v4float32,))):
XmmIntrin("_mm_or_ps", arity = 2,
output_precision = | |
<reponame>yuanyuan-deng/RDM-osf.io
# -*- coding: utf-8 -*-
# mAP core Group / Member syncronization
import time
import datetime
import logging
import os
import sys
import requests
import urllib
import re
from operator import attrgetter
from pprint import pformat as pp
from urlparse import urlparse
from django.utils import timezone
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
logger = logging.getLogger(__name__)
# initialize for standalone exec
if __name__ == '__main__':
logger.setLevel(level=logging.DEBUG)
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
from website.app import init_app
init_app(routes=False, set_backends=False)
from osf.models.user import OSFUser
from osf.models.node import Node
from osf.models.mapcore import MAPSync, MAPProfile
from osf.models.nodelog import NodeLog
from framework.auth import Auth
from website.util import web_url_for
from website.settings import (MAPCORE_HOSTNAME,
MAPCORE_AUTHCODE_PATH,
MAPCORE_TOKEN_PATH,
MAPCORE_CLIENTID,
MAPCORE_SECRET,
MAPCORE_AUTHCODE_MAGIC,
DOMAIN)
from nii.mapcore_api import (MAPCore, MAPCoreException, VERIFY,
mapcore_logger,
mapcore_api_disable_log,
mapcore_group_member_is_private)
logger = mapcore_logger(logger)
def mapcore_disable_log(level=logging.CRITICAL):
logger.setLevel(level=level)
mapcore_api_disable_log(level=level)
### Node.{title,description} : unicode
### from MAPCore methods : utf-8
# unicode to utf-8
def utf8(u):
if isinstance(u, unicode):
return u.encode('utf-8')
return u
# utf-8 to unicode
def utf8dec(s):
if isinstance(s, str):
return s.decode('utf-8')
return s
### Do not import from scripts.populate_institutions
def encode_uri_component(val):
return urllib.quote(val, safe='~()*!.\'')
def add_log(action, node, user, exc, save=False):
if node.logs.count() >= 1:
latest = node.logs.latest()
if latest.user == user and latest.action == action:
return # skip same log
node.add_log(
action=action,
params={
'node': node._primary_key,
},
auth=Auth(user=user),
save=save,
)
def mapcore_sync_is_enabled():
return MAPSync.is_enabled() if MAPCORE_CLIENTID else False
def mapcore_sync_set_enabled():
MAPSync.set_enabled(True)
def mapcore_sync_set_disabled():
MAPSync.set_enabled(False)
def mapcore_sync_upload_all(verbose=True):
count_all = 0
error_nodes = []
if not mapcore_sync_is_enabled():
return
for node in Node.objects.filter(is_deleted=False):
count_all += 1
if verbose:
print(u'*** Node: guid={}. title={}'.format(node._id, node.title))
mapcore_set_standby_to_upload(node, log=False)
try:
admin_user = get_one_admin(node)
mapcore_sync_rdm_project_or_map_group(admin_user, node,
use_raise=True)
except Exception:
error_nodes.append(node)
return (count_all, error_nodes)
# True or Exception
def mapcore_api_is_available0(user):
mapcore = MAPCore(user)
mapcore.get_api_version()
logger.debug('mapcore_api_is_available::Access Token (for {}) is up-to-date'.format(user.username))
return True
def mapcore_api_is_available(user):
return mapcore_api_is_available0(user)
def mapcore_log_error(msg):
logger.error(msg)
#
# lock node or user
#
class MAPCoreLocker():
def lock_user(self, user):
while True:
#print('before transaction.atomic')
with transaction.atomic():
#print('transaction.atomic start')
u = OSFUser.objects.select_for_update().get(username=user.username)
if not u.mapcore_api_locked:
#print('before lock')
#time.sleep(5) # for debug
u.mapcore_api_locked = True
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_api_locked=True')
return
#print('cannot get lock, sleep 1')
time.sleep(1)
def unlock_user(self, user):
with transaction.atomic():
u = OSFUser.objects.select_for_update().get(username=user.username)
u.mapcore_api_locked = False
u.save()
logger.debug('OSFUser(' + u.username + ').mapcore_api_locked=False')
def lock_node(self, node):
while True:
with transaction.atomic():
#print('transaction.atomic start')
n = Node.objects.select_for_update().get(guids___id=node._id)
if not n.mapcore_api_locked:
n.mapcore_api_locked = True
n.save()
logger.debug('Node(' + n._id + ').mapcore_api_locked=True')
return
#print('cannot get lock, sleep 1')
time.sleep(1)
def unlock_node(self, node):
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
#print('n.mapcore_api_locked={}'.format(n.mapcore_api_locked))
n.mapcore_api_locked = False
n.save()
logger.debug('Node(' + n._id + ').mapcore_api_locked=False')
locker = MAPCoreLocker()
def mapcore_unlock_all():
logger.info('mapcore_unlock_all() start')
with transaction.atomic():
for user in OSFUser.objects.all():
if user.mapcore_api_locked:
user.mapcore_api_locked = False
user.save()
logger.info('mapcore_unlock_all(): unlocked: User={}'.format(user.username))
for node in Node.objects.all():
if node.mapcore_api_locked:
node.mapcore_api_locked = False
node.save()
logger.info('mapcore_unlock_all(): unlocked: Node={}'.format(node._id))
logger.info('mapcore_unlock_all() done')
def mapcore_request_authcode(user, params):
'''
get an authorization code from mAP. this process will redirect some times.
:param params dict of GET parameters in request
'''
logger.debug('mapcore_request_authcode get params:\n')
logger.debug(pp(params))
next_url = params.get('next_url')
if next_url is not None:
state_str = (MAPCORE_AUTHCODE_MAGIC + next_url).encode('utf-8').encode('base64')
else:
state_str = MAPCORE_AUTHCODE_MAGIC
# make call
url = MAPCORE_HOSTNAME + MAPCORE_AUTHCODE_PATH
redirect_uri = DOMAIN + web_url_for('mapcore_oauth_complete')[1:]
logger.info('mapcore_request_authcode: redirect_uri is [' + redirect_uri + ']')
next_params = {'response_type': 'code',
'redirect_uri': redirect_uri,
'client_id': MAPCORE_CLIENTID,
'state': state_str}
target = url + '?' + urllib.urlencode(next_params)
entity_ids = user.get_idp_entity_ids()
if len(entity_ids) == 1:
query = '{}/Shibboleth.sso/DS?entityID={}&target={}'.format(
MAPCORE_HOSTNAME,
encode_uri_component(entity_ids[0]), encode_uri_component(target))
else:
query = target
logger.info('redirect to AuthCode request: ' + query)
return query
def mapcore_receive_authcode(user, params):
'''
here is the starting point of user registraion for mAP
:param user OSFUser object of current user
:param params dict of url parameters in request
'''
logger.debug('get an oatuh response:')
s = ''
for k, v in params.items():
s += '(' + k + ',' + v + ') '
logger.debug('oauth returned parameters: ' + s)
# authorization code check
if 'code' not in params or 'state' not in params:
raise ValueError('invalid response from oauth provider')
# exchange autorization code to access token
authcode = params['code']
# authcode = 'AUTHORIZATIONCODESAMPLE'
# eppn = '<EMAIL>'
redirect_uri = DOMAIN + web_url_for('mapcore_oauth_complete')[1:]
(access_token, refresh_token) = mapcore_get_accesstoken(authcode, redirect_uri)
# set mAP attribute into current user
with transaction.atomic():
u = OSFUser.objects.select_for_update().get(username=user.username)
if u.map_profile is None:
map_profile = MAPProfile.objects.create()
u.map_profile = map_profile
u.save()
user.reload()
else:
map_profile = u.map_profile
map_profile.oauth_access_token = access_token
map_profile.oauth_refresh_token = refresh_token
map_profile.oauth_refresh_time = timezone.now()
map_profile.save()
logger.debug('User [' + u.eppn + '] get access_token [' + access_token + '] -> saved')
# DEBUG: read record and print
"""
logger.info('In database:')
me = OSFUser.objects.get(eppn=user.eppn)
logger.info('name: ' + me.fullname)
logger.info('eppn: ' + me.eppn)
if me.map_profile:
logger.info('access_token: ' + me.map_profile.oauth_access_token)
logger.info('refresh_token: ' + me.map_profile.oauth_refresh_token)
"""
if params['state'] != MAPCORE_AUTHCODE_MAGIC:
s = params['state'].decode('base64').decode('utf-8')
return re.sub('^' + MAPCORE_AUTHCODE_MAGIC, '', s) # next_url
return DOMAIN # redirect to home -> will redirect to dashboard
def mapcore_get_accesstoken(authcode, redirect):
'''
exchange authorization code to access token and refresh token
API call returns the JSON response from mAP authorization code service
'''
logger.info('mapcore_get_accesstoken started.')
url = MAPCORE_HOSTNAME + MAPCORE_TOKEN_PATH
basic_auth = (MAPCORE_CLIENTID, MAPCORE_SECRET)
param = {
'grant_type': 'authorization_code',
'redirect_uri': redirect,
'code': authcode
}
param = urllib.urlencode(param)
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
res = requests.post(url, data=param, headers=headers, auth=basic_auth, verify=VERIFY)
res.raise_for_status() # error check
logger.info('mapcore_get_accesstoken response: ' + res.text)
json = res.json()
return (json['access_token'], json['refresh_token'])
# def mapcore_refresh_accesstoken(user, force=False):
# '''
# refresh access token with refresh token
# :param user OSFUser
# :param force falg to avoid availablity check
# :return result 0..success, 1..must be login again, -1..any error
# '''
#
# logger.info('refuresh token for [' + user.eppn + '].')
# url = MAPCORE_HOSTNAME + MAPCORE_TOKEN_PATH
#
# # access token availability check
# if not force:
# param = {'access_token': user.map_profile.oauth_access_token}
# headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'}
# res = requests.post(url, data=param, headers=headers, verify=VERIFY)
# if res.status_code == 200 and 'success' in res.json():
# return 0 # notihng to do
#
# # do refresh
# basic_auth = (MAPCORE_CLIENTID, MAPCORE_SECRET)
# param = {
# 'grant_type': 'refresh_token',
# 'refresh_token': user.map_profile.oauth_refresh_token
# }
# param = urllib.urlencode(param)
# headers = {
# 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
# }
# res = requests.post(url, data=param, headers=headers, auth=basic_auth, verify=VERIFY)
# json = res.json()
# if res.status_code != 200 or 'access_token' not in json:
# return -1
# logger.info('User [' + user.eppn + '] refresh access_token by [' + json['access_token'])
#
# # update database
# user.map_profile.oauth_access_token = json['access_token']
# user.map_profile.oauth_refresh_token = json['refresh_token']
# user.map_profile.oauth_refresh_time = timezone.now()
# user.map_profile.save()
# user.save()
#
# return 0
def mapcore_remove_token(user):
if user.map_profile is None:
return
user.map_profile.oauth_access_token = ''
user.map_profile.oauth_refresh_token = ''
user.map_profile.oauth_refresh_time = timezone.now()
user.map_profile.save()
###
### sync functions
###
# ignore user.eppn=None
def query_contributors(node):
return node.contributors.exclude(eppn=None)
def query_admin_contributors(node):
admins = []
for admin in node.get_admin_contributors(node.contributors):
admins.append(admin)
return admins
def get_one_admin(node):
admins = query_admin_contributors(node)
if admins is None or len(admins) == 0:
return None
if node.creator.is_disabled is False and node.creator in admins:
return node.creator
for admin in admins:
if admin.is_disabled is False:
return admin
raise MAPCoreException(None, 'GRDM project[{}]: No admin contributor exists. (unexpected)'.format(node._id))
def remove_node(node):
last_e = None
for admin in node.get_admin_contributors(node.contributors):
try:
node.remove_node(Auth(user=admin))
break
except Exception as e:
last_e = e
if last_e:
logger.error('GRDM project[{}] cannot be deleted: {}'.format(node._id, utf8(str(e))))
# OSFuser essential feild keeper for comparing member
class RDMmember(object):
def __init__(self, node, user):
self.node = node
self.user = user
self.eppn = user.eppn # ePPN
self.user_id = user._id # RDM internal user_id
if node.has_permission(user, 'admin', check_parent=False):
self.is_admin = True
# self.access_token = user.map_profile.oauth_access_token
# self.refresh_token = user.map_profile.oauth_refresh_token
else:
self.is_admin = False
def is_admin(self):
return self.is_admin
# compare member lists and apply actions
def compare_members(rdm_members1, map_members1, to_map):
rdm_members = sorted(rdm_members1, key=attrgetter('eppn'))
map_members = sorted(map_members1, key=lambda x: x['eppn'])
rdm_index = 0
map_index = 0
add = []
delete = []
upg = []
downg = []
while rdm_index < len(rdm_members) and map_index < len(map_members):
logger.debug('compare_members: start: rdm_index={}, len(rdm_members)={}, map_index={}, len(map_members)={}'.format(rdm_index, len(rdm_members), map_index, len(map_members)))
if map_members[map_index]['eppn'] == rdm_members[rdm_index].eppn:
# exist in both -> check admin
if rdm_members[rdm_index].is_admin:
if map_members[map_index]['admin'] == 1 or map_members[map_index]['admin'] == 2:
# admin @ both
pass
else:
# admin @ rdm only
if to_map:
upg.append(map_members[map_index])
else:
downg.append(rdm_members[rdm_index])
else:
if map_members[map_index]['admin'] == 1 or map_members[map_index]['admin'] == 2:
# admin in map only
if to_map:
downg.append(map_members[map_index])
else:
upg.append(rdm_members[rdm_index])
else:
# regular @ both
pass
rdm_index += 1
map_index += 1
elif map_members[map_index]['eppn'] < rdm_members[rdm_index].eppn:
# exist in map only
if to_map:
delete.append(map_members[map_index])
else:
add.append(map_members[map_index])
map_index += 1
else:
# exist in rdm only
if to_map:
add.append(rdm_members[rdm_index])
else:
delete.append(rdm_members[rdm_index])
rdm_index += 1
if to_map:
while rdm_index < len(rdm_members):
add.append(rdm_members[rdm_index])
rdm_index += 1
while map_index < len(map_members):
delete.append(map_members[map_index])
map_index += 1
else:
while map_index < len(map_members):
add.append(map_members[map_index])
| |
keep it
# in the `endpoint` command we use the standard
if ip_as_string and isinstance(ip, list):
ip = ip[0]
os_type = convert_os_to_standard(single_endpoint.get('os_type', ''))
endpoint = Common.Endpoint(
id=single_endpoint.get('endpoint_id'),
hostname=hostname,
ip_address=ip,
os=os_type,
status=status,
is_isolated=is_isolated,
mac_address=single_endpoint.get('mac_address'),
domain=single_endpoint.get('domain'),
vendor=INTEGRATION_NAME)
standard_endpoints.append(endpoint)
return standard_endpoints
def endpoint_command(client, args):
endpoint_id_list = argToList(args.get('id'))
endpoint_ip_list = argToList(args.get('ip'))
endpoint_hostname_list = argToList(args.get('hostname'))
endpoints = client.get_endpoints(
endpoint_id_list=endpoint_id_list,
ip_list=endpoint_ip_list,
hostname=endpoint_hostname_list,
)
standard_endpoints = generate_endpoint_by_contex_standard(endpoints, True)
command_results = []
if standard_endpoints:
for endpoint in standard_endpoints:
endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH)
hr = tableToMarkdown('Cortex XDR Endpoint', endpoint_context)
command_results.append(CommandResults(
readable_output=hr,
raw_response=endpoints,
indicator=endpoint
))
else:
command_results.append(CommandResults(
readable_output="No endpoints were found",
raw_response=endpoints,
))
return command_results
def create_parsed_alert(product, vendor, local_ip, local_port, remote_ip, remote_port, event_timestamp, severity,
alert_name, alert_description):
alert = {
"product": product,
"vendor": vendor,
"local_ip": local_ip,
"local_port": local_port,
"remote_ip": remote_ip,
"remote_port": remote_port,
"event_timestamp": event_timestamp,
"severity": severity,
"alert_name": alert_name,
"alert_description": alert_description
}
return alert
def insert_parsed_alert_command(client, args):
product = args.get('product')
vendor = args.get('vendor')
local_ip = args.get('local_ip')
local_port = arg_to_int(
arg=args.get('local_port'),
arg_name='local_port'
)
remote_ip = args.get('remote_ip')
remote_port = arg_to_int(
arg=args.get('remote_port'),
arg_name='remote_port'
)
severity = args.get('severity')
alert_name = args.get('alert_name')
alert_description = args.get('alert_description', '')
if args.get('event_timestamp') is None:
# get timestamp now if not provided
event_timestamp = int(round(time.time() * 1000))
else:
event_timestamp = int(args.get('event_timestamp'))
alert = create_parsed_alert(
product=product,
vendor=vendor,
local_ip=local_ip,
local_port=local_port,
remote_ip=remote_ip,
remote_port=remote_port,
event_timestamp=event_timestamp,
severity=severity,
alert_name=alert_name,
alert_description=alert_description
)
client.insert_alerts([alert])
return (
'Alert inserted successfully',
None,
None
)
def insert_cef_alerts_command(client, args):
# parsing alerts list. the reason we don't use argToList is because cef_alerts could contain comma (,) so
# we shouldn't split them by comma
alerts = args.get('cef_alerts')
if isinstance(alerts, list):
pass
elif isinstance(alerts, str):
if alerts[0] == '[' and alerts[-1] == ']':
# if the string contains [] it means it is a list and must be parsed
alerts = json.loads(alerts)
else:
# otherwise it is a single alert
alerts = [alerts]
else:
raise ValueError('Invalid argument "cef_alerts". It should be either list of strings (cef alerts), '
'or single string')
client.insert_cef_alerts(alerts)
return (
'Alerts inserted successfully',
None,
None
)
def isolate_endpoint_command(client, args):
endpoint_id = args.get('endpoint_id')
disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False))
incident_id = arg_to_number(args.get('incident_id'))
endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id])
if len(endpoint) == 0:
raise ValueError(f'Error: Endpoint {endpoint_id} was not found')
endpoint = endpoint[0]
endpoint_status = endpoint.get('endpoint_status')
is_isolated = endpoint.get('is_isolated')
if is_isolated == 'AGENT_ISOLATED':
return (
f'Endpoint {endpoint_id} already isolated.',
None,
None
)
if is_isolated == 'AGENT_PENDING_ISOLATION':
return (
f'Endpoint {endpoint_id} pending isolation.',
None,
None
)
if endpoint_status == 'UNINSTALLED':
raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be isolated.')
if endpoint_status == 'DISCONNECTED':
if disconnected_should_return_error:
raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be isolated.')
else:
return (
f'Warning: isolation action is pending for the following disconnected endpoint: {endpoint_id}.',
{f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id},
None)
if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION':
raise ValueError(
f'Error: Endpoint {endpoint_id} is pending isolation cancellation and therefore can not be isolated.'
)
client.isolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id)
return (
f'The isolation request has been submitted successfully on Endpoint {endpoint_id}.\n'
f'To check the endpoint isolation status please run: !xdr-get-endpoints endpoint_id_list={endpoint_id}'
f' and look at the [is_isolated] field.',
{f'{INTEGRATION_CONTEXT_BRAND}.Isolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id},
None
)
def unisolate_endpoint_command(client, args):
endpoint_id = args.get('endpoint_id')
incident_id = arg_to_number(args.get('incident_id'))
disconnected_should_return_error = not argToBoolean(args.get('suppress_disconnected_endpoint_error', False))
endpoint = client.get_endpoints(endpoint_id_list=[endpoint_id])
if len(endpoint) == 0:
raise ValueError(f'Error: Endpoint {endpoint_id} was not found')
endpoint = endpoint[0]
endpoint_status = endpoint.get('endpoint_status')
is_isolated = endpoint.get('is_isolated')
if is_isolated == 'AGENT_UNISOLATED':
return (
f'Endpoint {endpoint_id} already unisolated.',
None,
None
)
if is_isolated == 'AGENT_PENDING_ISOLATION_CANCELLATION':
return (
f'Endpoint {endpoint_id} pending isolation cancellation.',
None,
None
)
if endpoint_status == 'UNINSTALLED':
raise ValueError(f'Error: Endpoint {endpoint_id}\'s Agent is uninstalled and therefore can not be un-isolated.')
if endpoint_status == 'DISCONNECTED':
if disconnected_should_return_error:
raise ValueError(f'Error: Endpoint {endpoint_id} is disconnected and therefore can not be un-isolated.')
else:
return (
f'Warning: un-isolation action is pending for the following disconnected endpoint: {endpoint_id}.',
{
f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)'
f'': endpoint_id}, None)
if is_isolated == 'AGENT_PENDING_ISOLATION':
raise ValueError(
f'Error: Endpoint {endpoint_id} is pending isolation and therefore can not be un-isolated.'
)
client.unisolate_endpoint(endpoint_id=endpoint_id, incident_id=incident_id)
return (
f'The un-isolation request has been submitted successfully on Endpoint {endpoint_id}.\n'
f'To check the endpoint isolation status please run: !xdr-get-endpoints endpoint_id_list={endpoint_id}'
f' and look at the [is_isolated] field.',
{f'{INTEGRATION_CONTEXT_BRAND}.UnIsolation.endpoint_id(val.endpoint_id == obj.endpoint_id)': endpoint_id},
None
)
def arg_to_timestamp(arg, arg_name: str, required: bool = False):
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
# timestamp that str - we just convert it to int
return int(arg)
if isinstance(arg, str):
# if the arg is string of date format 2019-10-23T00:00:00 or "3 days", etc
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp() * 1000)
if isinstance(arg, (int, float)):
return arg
def get_audit_management_logs_command(client, args):
email = argToList(args.get('email'))
result = argToList(args.get('result'))
_type = argToList(args.get('type'))
sub_type = argToList(args.get('sub_type'))
timestamp_gte = arg_to_timestamp(
arg=args.get('timestamp_gte'),
arg_name='timestamp_gte'
)
timestamp_lte = arg_to_timestamp(
arg=args.get('timestamp_lte'),
arg_name='timestamp_lte'
)
page_number = arg_to_int(
arg=args.get('page', 0),
arg_name='Failed to parse "page". Must be a number.',
required=True
)
limit = arg_to_int(
arg=args.get('limit', 20),
arg_name='Failed to parse "limit". Must be a number.',
required=True
)
search_from = page_number * limit
search_to = search_from + limit
sort_by = args.get('sort_by')
sort_order = args.get('sort_order', 'asc')
audit_logs = client.audit_management_logs(
email=email,
result=result,
_type=_type,
sub_type=sub_type,
timestamp_gte=timestamp_gte,
timestamp_lte=timestamp_lte,
search_from=search_from,
search_to=search_to,
sort_by=sort_by,
sort_order=sort_order
)
return (
tableToMarkdown('Audit Management Logs', audit_logs, [
'AUDIT_ID',
'AUDIT_RESULT',
'AUDIT_DESCRIPTION',
'AUDIT_OWNER_NAME',
'AUDIT_OWNER_EMAIL',
'AUDIT_ASSET_JSON',
'AUDIT_ASSET_NAMES',
'AUDIT_HOSTNAME',
'AUDIT_REASON',
'AUDIT_ENTITY',
'AUDIT_ENTITY_SUBTYPE',
'AUDIT_SESSION_ID',
'AUDIT_CASE_ID',
'AUDIT_INSERT_TIME'
]),
{
f'{INTEGRATION_CONTEXT_BRAND}.AuditManagementLogs(val.AUDIT_ID == obj.AUDIT_ID)': audit_logs
},
audit_logs
)
def get_audit_agent_reports_command(client, args):
endpoint_ids = argToList(args.get('endpoint_ids'))
endpoint_names = argToList(args.get('endpoint_names'))
result = argToList(args.get('result'))
_type = argToList(args.get('type'))
sub_type = argToList(args.get('sub_type'))
timestamp_gte = arg_to_timestamp(
arg=args.get('timestamp_gte'),
arg_name='timestamp_gte'
)
timestamp_lte = arg_to_timestamp(
arg=args.get('timestamp_lte'),
arg_name='timestamp_lte'
)
page_number = arg_to_int(
arg=args.get('page', 0),
arg_name='Failed to parse "page". Must be a number.',
required=True
)
limit = arg_to_int(
arg=args.get('limit', 20),
arg_name='Failed to parse "limit". Must be a number.',
required=True
)
search_from = page_number * limit
search_to = search_from + limit
sort_by = args.get('sort_by')
sort_order = args.get('sort_order', 'asc')
audit_logs = client.get_audit_agent_reports(
endpoint_ids=endpoint_ids,
endpoint_names=endpoint_names,
result=result,
_type=_type,
sub_type=sub_type,
timestamp_gte=timestamp_gte,
timestamp_lte=timestamp_lte,
search_from=search_from,
search_to=search_to,
sort_by=sort_by,
sort_order=sort_order
)
integration_context = {f'{INTEGRATION_CONTEXT_BRAND}.AuditAgentReports': audit_logs}
endpoint_context = create_endpoint_context(audit_logs)
if endpoint_context:
integration_context[Common.Endpoint.CONTEXT_PATH] = endpoint_context
return (
tableToMarkdown('Audit Agent Reports', audit_logs),
integration_context,
audit_logs
)
def get_distribution_url_command(client, args):
distribution_id = args.get('distribution_id')
package_type = args.get('package_type')
url = client.get_distribution_url(distribution_id, package_type)
return (
f'[Distribution URL]({url})',
{
'PaloAltoNetworksXDR.Distribution(val.id == obj.id)': {
'id': distribution_id,
'url': url
}
},
url
)
def get_distribution_status_command(client, args):
distribution_ids = argToList(args.get('distribution_ids'))
distribution_list = []
for distribution_id in distribution_ids:
status = client.get_distribution_status(distribution_id)
distribution_list.append({
'id': distribution_id,
'status': status
})
return (
tableToMarkdown('Distribution Status', distribution_list, ['id', 'status']),
{
f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution_list
},
distribution_list
)
def get_distribution_versions_command(client):
versions = client.get_distribution_versions()
readable_output = []
for operation_system in versions.keys():
os_versions = versions[operation_system]
readable_output.append(
tableToMarkdown(operation_system, os_versions or [], ['versions'])
)
return (
'\n\n'.join(readable_output),
{
f'{INTEGRATION_CONTEXT_BRAND}.DistributionVersions': versions
},
versions
)
def create_distribution_command(client, args):
name = args.get('name')
platform = args.get('platform')
package_type = args.get('package_type')
description = args.get('description')
agent_version = args.get('agent_version')
if not platform == 'android' and not agent_version:
# agent_version must be provided for all the platforms except android
raise ValueError(f'Missing argument "agent_version" for platform "{platform}"')
distribution_id = client.create_distribution(
name=name,
platform=platform,
package_type=package_type,
agent_version=agent_version,
description=description
)
distribution = {
'id': distribution_id,
'name': name,
'platform': platform,
'package_type': package_type,
'agent_version': agent_version,
'description': description
}
return (
f'Distribution {distribution_id} created successfully',
{
f'{INTEGRATION_CONTEXT_BRAND}.Distribution(val.id == obj.id)': distribution
},
distribution
)
def blacklist_files_command(client, args):
hash_list = argToList(args.get('hash_list'))
comment = args.get('comment')
incident_id = arg_to_number(args.get('incident_id'))
res = client.blacklist_files(hash_list=hash_list, comment=comment, incident_id=incident_id)
if isinstance(res, dict) and res.get('err_extra') != "All hashes have already been added to the allow or block list":
raise ValueError(res)
markdown_data = [{'fileHash': file_hash} for file_hash in hash_list]
return (
tableToMarkdown('Blacklist Files', markdown_data, headers=['fileHash'], headerTransform=pascalToSpace),
{
f'{INTEGRATION_CONTEXT_BRAND}.blackList.fileHash(val.fileHash == obj.fileHash)': hash_list
},
argToList(hash_list)
)
def whitelist_files_command(client, args):
hash_list = argToList(args.get('hash_list'))
comment = args.get('comment')
incident_id = arg_to_number(args.get('incident_id'))
client.whitelist_files(hash_list=hash_list, comment=comment, incident_id=incident_id)
markdown_data = [{'fileHash': file_hash} for file_hash in hash_list]
return (
tableToMarkdown('Whitelist Files', markdown_data, ['fileHash'], headerTransform=pascalToSpace),
{
f'{INTEGRATION_CONTEXT_BRAND}.whiteList.fileHash(val.fileHash == obj.fileHash)': hash_list
},
argToList(hash_list)
)
def quarantine_files_command(client, args):
endpoint_id_list = argToList(args.get("endpoint_id_list"))
file_path = args.get("file_path")
file_hash = args.get("file_hash")
incident_id = arg_to_number(args.get('incident_id'))
reply = client.quarantine_files(
endpoint_id_list=endpoint_id_list,
file_path=file_path,
file_hash=file_hash,
incident_id=incident_id
)
output = {
'endpointIdList': endpoint_id_list,
'filePath': file_path,
'fileHash': | |
This creates a new copy of the message in the destination folder and
removes the original message.
### Parameters
----
message_id : str
The ID of the message you wish to move.
destination_id : str
The name of the folder you want to move it to.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/move'.format(
message_id=message_id
),
json={"destinationId": destination_id}
)
return content
def move_user_message(self, user_id: str, message_id: str, destination_id: str) -> Dict:
"""Move a message to another folder within the specified user's mailbox.
### Overview:
----
This creates a new copy of the message in the destination folder and
removes the original message.
### Parameters
----
user_id : str
The user ID of the mailbox that you want to move a
message for.
message_id : str
The ID of the message you wish to move.
destination_id : str
The name of the folder you want to move it to.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/move'.format(
user_id=user_id,
message_id=message_id
),
json={"destinationId": destination_id}
)
return content
def create_reply_my_message(self, message_id: str) -> dict:
"""Create a draft of the reply to the specified message. For
the default user.
### Overview:
----
You can then update the draft to add reply content to the
body or change other message properties, or, simply send
the draft.
### Parameters
----
message_id : str
The message ID for which you wish to use
as a reply message.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/createReply'.format(
message_id=message_id
)
)
return content
def create_reply_user_message(self, user_id: str, message_id: str) -> Dict:
"""Create a draft of the reply to the specified message.
### Overview:
----
You can then update the draft to add reply content to the
body or change other message properties, or, simply send
the draft.
### Parameters
----
user_id : str
The user ID of the mailbox that you want to create a
reply message for.
message_id : str
The message ID for which you wish to use
as a reply message.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/createReply'.format(
user_id=user_id,
message_id=message_id
)
)
return content
def reply_to_my_message(self, message_id: str, message: dict) -> dict:
"""Reply to the sender of a message, add a comment or modify any updateable properties
all in one reply call. The message is then saved in the Sent Items folder. For the
default user.
### Parameters
----
message_id : str
The message ID for which you wish to reply to.
messgae : dict
The message you want to reply with.
### Returns
----
Dict
If successful, this method returns 202 Accepted response
code. It does not return anything in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/reply'.format(
message_id=message_id
),
json=message
)
return content
def reply_to_user_message(self, user_id: str, message_id: str, message: dict) -> Dict:
"""Reply to the sender of a message, add a comment or modify any updateable properties
all in one reply call. The message is then saved in the Sent Items folder.
### Parameters
----
user_id : str
The user ID of the mailbox that contains the message
you want to reply to.
message_id : str
The message ID for which you wish to reply to.
messgae : dict
The message you want to reply with.
### Returns
----
Dict
If successful, this method returns 202 Accepted response
code. It does not return anything in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/reply'.format(
user_id=user_id,
message_id=message_id
),
json=message
)
return content
def create_reply_all_my_message(self, message_id: str) -> dict:
"""Create a draft to reply to the sender and all the recipients of the specified message.
For the default user.
### Overview:
----
You can then update the draft to add reply content to the body
or change other message properties, or, simply send the draft.
### Parameters
----
message_id : str
The message ID for which you wish to use
as a repy message
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/createReplyAll'.format(
message_id=message_id
)
)
return content
def create_reply_all_user_message(self, user_id: str, message_id: str) -> Dict:
"""Create a draft to reply to the sender and all the recipients of the specified message.
### Overview:
----
You can then update the draft to add reply content to the body
or change other message properties, or, simply send the draft.
### Parameters
----
message_id : str
The message ID for which you wish to reply all
to.
user_id : str
The User ID you want to reply all messages for.
message : dict
The message you want to respond with.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/createReplyAll'.format(
user_id=user_id,
message_id=message_id
)
)
return content
def reply_all_my_message(self, message_id: str, message: dict) -> dict:
"""Create a draft to reply to the sender and all the recipients of the
specified message for the default user.
### Overview:
----
You can then update the draft to add reply content to the body
or change other message properties, or, simply send the draft.
### Parameters
----
message_id : str
The message ID for which you wish to reply all
to.
message : dict
The message you want to respond with.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/replyAll'.format(
message_id=message_id
),
json=message
)
return content
def reply_all_user_message(self, user_id: str, message_id: str, message: dict) -> Dict:
"""Create a draft to reply to the sender and all the recipients of the specified message.
### Overview:
----
You can then update the draft to add reply content to the body
or change other message properties, or, simply send the draft.
### Parameters
----
message_id : str
The message ID for which you wish to reply all
to.
user_id : str
The User ID you want to reply all messages for.
message : dict
The message you want to respond with.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/replyAll'.format(
user_id=user_id,
message_id=message_id
),
json=message
)
return content
def create_forward_my_message(self, message_id: str) -> dict:
"""Create a draft to forward the specified message. For the default user.
### Overview:
----
You can then update the draft to add content to the body
or change other message properties, or, simply send the
draft.
### Parameters
----
message_id : str
The message ID for which you wish to forward.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and `Message` object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/me/messages/{message_id}/createForward'.format(
message_id=message_id
)
)
return content
def create_forward_user_message(self, user_id: str, message_id: str) -> Dict:
"""Create a draft to forward the specified message.
### Overview:
----
You can then update the draft to add content to the body
or change other message properties, or, simply send the
draft.
### Parameters
----
message_id : str
The message ID for which you wish to forward.
user_id : dict
The User ID you want to create a new forward
message for.
### Returns
----
Dict
If successful, this method returns 201 Created response
code and Message object in the response body.
"""
content = self.graph_session.make_request(
method='post',
endpoint='/users/{user_id}/messages/{message_id}/createForward'.format(
user_id=user_id,
message_id=message_id
| |
import os
import numpy as np
import argparse
import time
import torch
import torchvision
import cv2
def yolo_forward_dynamic(output, num_classes, anchors, num_anchors, scale_x_y):
# Output would be invalid if it does not satisfy this assert
# assert (output.size(1) == (5 + num_classes) * num_anchors)
# print(output.size())
# Slice the second dimension (channel) of output into:
# [ 2, 2, 1, num_classes, 2, 2, 1, num_classes, 2, 2, 1, num_classes ]
# And then into
# bxy = [ 6 ] bwh = [ 6 ] det_conf = [ 3 ] cls_conf = [ num_classes * 3 ]
# batch = output.size(0)
# H = output.size(2)
# W = output.size(3)
bxy_list = []
bwh_list = []
det_confs_list = []
cls_confs_list = []
for i in range(num_anchors):
begin = i * (5 + num_classes)
end = (i + 1) * (5 + num_classes)
bxy_list.append(output[:, begin: begin + 2])
bwh_list.append(output[:, begin + 2: begin + 4])
det_confs_list.append(output[:, begin + 4: begin + 5])
cls_confs_list.append(output[:, begin + 5: end])
# Shape: [batch, num_anchors * 2, H, W]
bxy = torch.cat(bxy_list, dim=1)
# Shape: [batch, num_anchors * 2, H, W]
bwh = torch.cat(bwh_list, dim=1)
# Shape: [batch, num_anchors, H, W]
det_confs = torch.cat(det_confs_list, dim=1)
# Shape: [batch, num_anchors * H * W]
# print(output.size(0),num_anchors * output.size(2) * output.size(3))
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3))
# Shape: [batch, num_anchors * num_classes, H, W]
cls_confs = torch.cat(cls_confs_list, dim=1)
# Shape: [batch, num_anchors, num_classes, H * W]
print(num_anchors, output.size(0), output.size(2), output.size(3))
cls_confs = cls_confs.view(output.size(0), num_anchors, num_classes, output.size(2) * output.size(3))
# Shape: [batch, num_anchors, num_classes, H * W] --> [batch, num_anchors * H * W, num_classes]
cls_confs = cls_confs.permute(0, 1, 3, 2).reshape(output.size(0), num_anchors * output.size(2) * output.size(3),
num_classes)
# Apply sigmoid(), exp() and softmax() to slices
print(bxy)
bxy = torch.sigmoid(bxy) * scale_x_y - 0.5 * (scale_x_y - 1)
bwh = torch.exp(bwh)
det_confs = torch.sigmoid(det_confs)
cls_confs = torch.sigmoid(cls_confs)
# Prepare C-x, C-y, P-w, P-h (None of them are torch related)
grid_x = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(3) - 1, output.size(3)), axis=0).repeat(output.size(2), 0), axis=0),
axis=0)
grid_y = np.expand_dims(np.expand_dims(
np.expand_dims(np.linspace(0, output.size(2) - 1, output.size(2)), axis=1).repeat(output.size(3), 1), axis=0),
axis=0)
# grid_x = torch.linspace(0, W - 1, W).reshape(1, 1, 1, W).repeat(1, 1, H, 1)
# grid_y = torch.linspace(0, H - 1, H).reshape(1, 1, H, 1).repeat(1, 1, 1, W)
anchor_w = []
anchor_h = []
for i in range(num_anchors):
anchor_w.append(anchors[i * 2])
anchor_h.append(anchors[i * 2 + 1])
device = None
cuda_check = output.is_cuda
if cuda_check:
device = output.get_device()
bx_list = []
by_list = []
bw_list = []
bh_list = []
# Apply C-x, C-y, P-w, P-h
for i in range(num_anchors):
ii = i * 2
# Shape: [batch, 1, H, W]
bx = bxy[:, ii: ii + 1] + torch.tensor(grid_x, device=device,
dtype=torch.float32) # grid_x.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
by = bxy[:, ii + 1: ii + 2] + torch.tensor(grid_y, device=device,
dtype=torch.float32) # grid_y.to(device=device, dtype=torch.float32)
# Shape: [batch, 1, H, W]
bw = bwh[:, ii: ii + 1] * anchor_w[i]
# Shape: [batch, 1, H, W]
bh = bwh[:, ii + 1: ii + 2] * anchor_h[i]
bx_list.append(bx)
by_list.append(by)
bw_list.append(bw)
bh_list.append(bh)
########################################
# Figure out bboxes from slices #
########################################
# Shape: [batch, num_anchors, H, W]
bx = torch.cat(bx_list, dim=1)
# Shape: [batch, num_anchors, H, W]
by = torch.cat(by_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bw = torch.cat(bw_list, dim=1)
# Shape: [batch, num_anchors, H, W]
bh = torch.cat(bh_list, dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
bx_bw = torch.cat((bx, bw), dim=1)
# Shape: [batch, 2 * num_anchors, H, W]
by_bh = torch.cat((by, bh), dim=1)
# normalize coordinates to [0, 1]
bx_bw /= output.size(3)
by_bh /= output.size(2)
# Shape: [batch, num_anchors * H * W, 1]
bx = bx_bw[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
by = by_bh[:, :num_anchors].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bw = bx_bw[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bh = by_bh[:, num_anchors:].view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
bx1 = bx - bw * 0.5
by1 = by - bh * 0.5
bx2 = bx1 + bw
by2 = by1 + bh
# Shape: [batch, num_anchors * h * w, 4] -> [batch, num_anchors * h * w, 1, 4]
boxes = torch.cat((bx1, by1, bx2, by2), dim=2).view(output.size(0), num_anchors * output.size(2) * output.size(3),
1, 4)
# boxes = boxes.repeat(1, 1, num_classes, 1)
# boxes: [batch, num_anchors * H * W, 1, 4]
# cls_confs: [batch, num_anchors * H * W, num_classes]
# det_confs: [batch, num_anchors * H * W]
det_confs = det_confs.view(output.size(0), num_anchors * output.size(2) * output.size(3), 1)
confs = cls_confs * det_confs
# boxes: [batch, num_anchors * H * W, 1, 4]
# confs: [batch, num_anchors * H * W, num_classes]
return boxes, confs
class YoloLayer(object):
''' Yolo layer
model_out: while inference,is post-processing inside or outside the model
true:outside
'''
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1, stride=32, model_out=False):
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors) // num_anchors
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.stride = stride
self.seen = 0
self.scale_x_y = 1
self.model_out = model_out
def forward(self, output):
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[m * self.anchor_step:(m + 1) * self.anchor_step]
masked_anchors = [anchor / self.stride for anchor in masked_anchors]
print(masked_anchors)
return yolo_forward_dynamic(output, self.num_classes, masked_anchors,
len(self.anchor_mask), scale_x_y=self.scale_x_y)
def get_region_boxes(boxes_and_confs):
# print('Getting boxes from boxes and confs ...')
boxes_list = []
confs_list = []
for item in boxes_and_confs:
boxes_list.append(item[0])
confs_list.append(item[1])
# boxes: [batch, num1 + num2 + num3, 1, 4]
# confs: [batch, num1 + num2 + num3, num_classes]
boxes = torch.cat(boxes_list, dim=1)
confs = torch.cat(confs_list, dim=1)
return [boxes, confs]
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def nms(conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append(
[ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3],
ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
return bboxes_batch
def post_process(flags):
names = np.loadtxt(flags.coco_class_names, dtype='str', delimiter='\n')
# 读取bin文件用于生成预测结果
bin_path = flags.bin_data_path
ori_path = flags.origin_jpg_path
anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
num_classes = 80
det_results_path = flags.det_results_path
os.makedirs(det_results_path, exist_ok=True)
total_img = set([name[:name.rfind('_')] for name in os.listdir(bin_path) if "bin" in name])
yolo1 = YoloLayer(anchor_mask=[0, 1, 2], num_classes=num_classes, | |
<filename>coral_model_v0/RunTimeD3D.py
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 10:37:17 2020
@author: hendrick
"""
# =============================================================================
# # # # import packages
# =============================================================================
import numpy as np
import pandas as pd
import matplotlib.tri as tri
import matplotlib.pyplot as plt
import bmi.wrapper
import os
from tqdm import tqdm
import datetime
from netCDF4 import Dataset
import faulthandler
faulthandler.enable()
# =============================================================================
# # # # specify directories of ddl- and input-files
# =============================================================================
D3D_HOME = os.path.join('p:\\11202744-008-vegetation-modelling', 'code_1709',
'windows', 'oss_artifacts_x64_63721', 'x64')
dflow_dir = os.path.join(D3D_HOME, 'dflowfm', 'bin', 'dflowfm.dll')
dimr_path = os.path.join(D3D_HOME, 'dimr', 'bin', 'dimr_dll.dll')
# work directory
model_folder = 'rt003'
workdir = os.path.join('p:\\11202744-008-vegetation-modelling', 'students',
'GijsHendrickx', 'models', 'RunTimeD3D', model_folder)
inputdir = os.path.join(workdir, 'timeseries')
# input files (Delft3D)
config_file = os.path.join(workdir, 'dimr_config.xml')
mdufile = os.path.join(workdir, 'fm', 'FlowFM.mdu')
# print directories and input-files as check
print('Delft3D home : {0}'.format(D3D_HOME))
print('DIMR-directory : {0}'.format(dimr_path))
print('Configuration file : {0}'.format(config_file))
# =============================================================================
# # # # prepare locations
# =============================================================================
# # print directories of input- and output-files
print('\nTime-series dir. : {0}'.format(inputdir))
# # intermediate figures
figfolder = os.path.join(workdir, 'figures')
# check existence and create if necessary
if not os.path.exists(figfolder):
os.mkdir(figfolder)
print('New folder created : {0}'.format(figfolder))
print('Figure directory : {0}'.format(figfolder))
# # output files
outputfolder = os.path.join(workdir, 'output')
# check existance and create if necessary
if not os.path.exists(outputfolder):
os.mkdir(outputfolder)
print('New folder created : {0}'.format(outputfolder))
print('Output directory : {0}'.format(outputfolder))
# =============================================================================
# # # # create correct environment
# =============================================================================
os.environ['PATH'] = (os.path.join(D3D_HOME, 'share', 'bin') + ';' +
os.path.join(D3D_HOME, 'dflowfm', 'bin') + ';' +
os.path.join(D3D_HOME, 'dimr', 'bin') + ';' +
os.path.join(D3D_HOME, 'dwaves', 'bin') + ';' +
os.path.join(D3D_HOME, 'esmf', 'scripts') + ';' +
os.path.join(D3D_HOME, 'swan', 'scripts'))
# print created environment as check
print('\nEnvironment : {0}\n'
.format(os.path.join(D3D_HOME, 'share', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dflowfm', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dimr', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'dwaves', 'bin')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'esmf', 'scripts')) +
' {0}\n'
.format(os.path.join(D3D_HOME, 'swan', 'scripts')))
# =============================================================================
# # # # define and initialize wrappers
# =============================================================================
# define DFM wrapper
modelFM = bmi.wrapper.BMIWrapper(engine=dflow_dir, configfile=mdufile)
# define DIMR wrapper
modelDIMR = bmi.wrapper.BMIWrapper(engine=dimr_path, configfile=config_file)
# initialise model
modelDIMR.initialize()
print('Model initialized.\n')
# =============================================================================
# # # # set the pointers to important model variables of FlowFM
# =============================================================================
# number of boxes, including boundary boxes
ndx = modelFM.get_var('ndx')
# number of non-boundary boxes, i.e. within-domain boxes
ndxi = modelFM.get_var('ndxi')
# x-coord. of the center of gravity of the boxes
xzw = modelFM.get_var('xzw')
# y-coord. of the center of gravity of the boxes
yzw = modelFM.get_var('yzw')
# total number of links between boxes
lnx = modelFM.get_var('lnx')
# number of links between within-domain boxes
lnxi = modelFM.get_var('lnxi')
# link martrix between adjacent boxes [ln, 2] matrix
ln = modelFM.get_var('ln')
# distance between the centers of adjacent boxes
dx = modelFM.get_var('dx')
# width of the interface between adjacent boxes
wu = modelFM.get_var('wu')
# surface area of the boxes
ba = modelFM.get_var('ba')
# =============================================================================
# # # # intermediate plotting function
# =============================================================================
# triangulate face coordinates for plotting
face_triang = tri.Triangulation(xzw[range(ndxi)], yzw[range(ndxi)])
# define basic plotting routine for model output
def showfld(fld, face_triang, lvls, ttl,
show=True, save=False, unit=None):
"""
Function to visualise the intermediate results from the model computations
in which the grid structure used in Delft3D-FM is translated such that a
field is presented.
"""
f = plt.figure()
plt.tricontourf(face_triang, fld, levels=lvls)
plt.title(ttl)
cbar = plt.colorbar(shrink=.5, extend='both', aspect=10)
if unit is not None:
cbar.set_label(unit, rotation=270, va='bottom')
if show:
plt.show()
if save:
f.savefig(os.path.join(figfolder, ('{0}.png'.format(ttl))),
bbox_inches='tight')
# =============================================================================
# # # # define functions
# =============================================================================
def dc_av(dc, hc, bc, tc):
"""
Translation from coral morphology parameters to representative coral
diameter as used in the hydrodynamic model.
Parameters
----------
dc : numeric
Diameter of the plate of the coral [m].
hc : numeric
Coral height [m].
bc : numeric
Diameter of the base of the coral [m].
tc : numeric
Thickness of the plate of the coral [m].
Returns
-------
dc_av : numeric
Representative coral diameter [m].
"""
# # calculations
dc_rep = (bc * (hc - tc) + dc * tc) / hc
# # output
return dc_rep
def morph2vegden(dc, hc, bc, tc, ac):
"""
Translation from coral morphology parameters to parameters as used for the
modelling of vegetation; i.e. rewrite the coral morphology into the
vegetation density.
Parameters
----------
dc : numeric
Diameter of the plate of the coral [m].
hc : numeric
Coral height [m].
bc : numeric
Diameter of the base of the coral [m].
tc : numeric
Thickness of the plate of the coral [m].
ac : numeric
Axial distance between corals [m].
Returns
-------
rnveg : numeric
Vegetation density [stems m^-2].
"""
# # calculations
# average diameter
dc_rep = dc_av(dc, hc, bc, tc)
# representative vegetation density
rnveg = (2 * dc_rep) / (ac ** 2)
# # output
return rnveg
# =============================================================================
# # # # set time parameters for coupled model
# =============================================================================
# # time-span
dperm = 7
# simulation time [days]
T = 4 * dperm
# # model time per vegetation step [s]
mtpervt = np.array([86400,
43200, 43200,
21600, 21600, 21600, 21600,
10800, 10800, 10800, 10800,
10800, 10800, 10800, 10800])
mtpervt = np.repeat(mtpervt, int(dperm))
mtpervt = np.append(3600, mtpervt)
# =============================================================================
# # # # define output
# =============================================================================
# # map-file
# map-file directory
mapfile = 'CoralModel_map.nc'
mapfilef = os.path.join(outputfolder, mapfile)
# time-interval > mtpervt
# # his-file
# location(s)
xynfilef = os.path.join(workdir, 'fm', 'FlowFm_obs.xyn')
xyn = pd.read_csv(xynfilef, header=None, delim_whitespace=True)
xyn.columns = ['x', 'y', 'name']
# his-file directory
hisfile = 'CoralModel_his.nc'
hisfilef = os.path.join(outputfolder, hisfile)
# time-interval > mtpervt
# =============================================================================
# # # # vegetation boundaries
# =============================================================================
xbndmin = min(xzw[range(ndxi)])
xbndmax = max(xzw[range(ndxi)])
ybndmin = min(yzw[range(ndxi)])
ybndmax = max(yzw[range(ndxi)])
xvbndmin = xbndmin
xvbndmax = xbndmax # (xbndmin + xbndmax) / 2
yvbndmin = ybndmin
yvbndmax = ybndmax
# =============================================================================
# # # # initialisation of vegetation variables
# =============================================================================
# # initial morphology
dc0 = .1 # m
hc0 = .2 # m
bc0 = dc0
tc0 = hc0
ac0 = .2 # m
# # carrying capacity
K = np.zeros(ndxi)
K[np.logical_and.reduce((xzw >= xvbndmin,
xzw <= xvbndmax,
yzw >= yvbndmin,
yzw <= yvbndmax))] = 1.
# # morphological dimensions
# diameter plate
dc = K * dc0
# coral height ~ stemheight
hc = modelFM.get_var('stemheight')
hc[range(ndxi)] = K * hc0
modelFM.set_var('stemheight', hc)
# diameter base
bc = K * bc0
# thickness plate
tc = K * tc0
# axial distance
ac = K * ac0
# representative diameter
diaveg = modelFM.get_var('diaveg')
diaveg[range(ndxi)] = K * dc_av(dc0, hc0, bc0, tc0)
modelFM.set_var('diaveg', diaveg)
# 'vegetation' density
rnveg = modelFM.get_var('rnveg')
rnveg[range(ndxi)] = K * morph2vegden(dc0, hc0, bc0, tc0, ac0)
modelFM.set_var('rnveg', rnveg)
# =============================================================================
# # # # run the model
# =============================================================================
print('Start time : {0}\n'.format(datetime.datetime.now().time()))
for i in tqdm(range(len(mtpervt))):
# # update hydrodynamic model
modelDIMR.update(mtpervt[i])
# # extract variables from DFM via BMI
# flow characteristics
is_sumvalsnd = modelFM.get_var('is_sumvalsnd')
is_maxvalsnd = modelFM.get_var('is_maxvalsnd')
is_dtint = modelFM.get_var('is_dtint')
uwav = modelFM.get_var('Uorb')
twav = modelFM.get_var('twav')
# param[range(ndxi), i]
# > i = 0 : shear stress [tau]
# > i = 1 : flow velocity [vel]
# > i = 2 : water depth [wd]
# morphological characteristics
# # calculate (mean and max.) values from DFM data
vel_mean = is_sumvalsnd[range(ndxi), 1] / is_dtint
vel_max = is_maxvalsnd[range(ndxi), 1]
wd_mean = is_sumvalsnd[range(ndxi), 2] / is_dtint
# # show intermediate model output
# # return coral data to hydrodynamic model
# reset counters
is_sumvalsnd.fill(0.)
is_maxvalsnd.fill(0.)
is_dtint.fill(0.)
# push counters and updated coral field to model
modelFM.set_var('is_sumvalsnd', is_sumvalsnd)
modelFM.set_var('is_maxvalsnd', is_maxvalsnd)
modelFM.set_var('is_dtint', is_dtint)
# # write model results in file
# map-file
if i == 0:
mset = Dataset(mapfilef, 'w', format='NETCDF4')
mset.description = 'Mapped simulation data of the RunTimeD3D-models.'
# dimensions
mset.createDimension('time', None)
mset.createDimension('nmesh2d_face', int(ndxi))
# variables
dt = mset.createVariable('time', int, ('time',))
dt.long_name = 'Delft3D-FM run time per vegetation time-step'
dt.units = 's'
x = mset.createVariable('mesh2d_x', 'f8', ('nmesh2d_face',))
x.long_name = 'x-coordinate'
x.units = 'm'
y = mset.createVariable('mesh2d_y', 'f8', ('nmesh2d_face',))
y.long_name = 'y-coordinate'
y.units = 'm'
Uwset = mset.createVariable('uw', 'f8',
('time', 'nmesh2d_face'))
Uwset.long_name = 'wave orbital velocity'
Uwset.units = 'm s^-1'
Ubset = mset.createVariable('ub', 'f8',
('time', 'nmesh2d_face'))
Ubset.long_name = 'mean bulk flow velocity'
Ubset.units = 'm s^-1'
Umset = mset.createVariable('ubmax', 'f8',
('time', 'nmesh2d_face'))
Umset.long_name = 'maximum bulk flow velocity'
Umset.units = 'm s^-1'
Hmset = mset.createVariable('h', 'f8',
('time', 'nmesh2d_face'))
Hmset.long_name = 'mean water depth'
Hmset.units = 'm'
Twset = mset.createVariable('Tp', 'f8',
('time', 'nmesh2d_face'))
Twset.long_name = 'peak wave period'
Twset.units = 's'
# data
dt[:] = mtpervt[i]
x[:] = xzw[range(ndxi)]
y[:] = yzw[range(ndxi)]
Uwset[-1, :] = uwav[range(ndxi)]
Ubset[-1, :] = vel_mean[range(ndxi)]
Umset[-1, :] = vel_max[range(ndxi)]
Hmset[-1, :] = wd_mean[range(ndxi)]
Twset[-1, :] = twav[range(ndxi)]
else:
mset = Dataset(mapfilef, mode='a')
# append data
mset['time'][:] = np.append(mset['time'][:], mtpervt[i])
mset['uw'][-1, :] = uwav[range(ndxi)]
mset['ub'][-1, :] = vel_mean[range(ndxi)]
mset['ubmax'][-1, :] = vel_max[range(ndxi)]
mset['h'][-1, :] = wd_mean[range(ndxi)]
mset['Tp'][-1, :] = twav[range(ndxi)]
mset.close()
# his-file
if i == 0:
hset = Dataset(hisfilef, 'w', format='NETCDF4')
hset.description = 'Historic | |
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# Trim
0x20100140L: {
'BASIC FILM BOX IOD': ['Basic Film Box'],
None: ['Basic Film Box'],
},
# ImagingFrequency
0x00180084L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# DetectorElementSpacing
0x00187022L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# ConventionalMachineVerificationSequence
0x00741044L: {
'RT CONVENTIONAL MACHINE VERIFICATION IOD': ['Rt Conventional Machine Verification'],
None: ['Rt Conventional Machine Verification'],
},
# TimeDistributionProtocol
0x00181802L: {
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Frame of Reference'],
None: ['Frame of Reference'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Frame of Reference'],
'12-LEAD ECG IOD': ['Frame of Reference'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Frame of Reference'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Frame of Reference'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Frame of Reference'],
'BASIC CARDIAC EP IOD': ['Frame of Reference'],
'BASIC VOICE AUDIO IOD': ['Frame of Reference'],
'HEMODYNAMIC IOD': ['Frame of Reference'],
'US IMAGE IOD': ['Frame of Reference'],
'AMBULATORY ECG IOD': ['Frame of Reference'],
'GENERAL ECG IOD': ['Frame of Reference'],
'XRF IMAGE IOD': ['Frame of Reference'],
'RESPIRATORY WAVEFORM IOD': ['Frame of Reference'],
'X-RAY RADIATION DOSE SR IOD': ['Frame of Reference'],
'GENERAL AUDIO WAVEFORM IOD': ['Frame of Reference'],
'PROCEDURE LOG IOD': ['Frame of Reference'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Frame of Reference'],
'ARTERIAL PULSE WAVEFORM IOD': ['Frame of Reference'],
},
# SurfaceModelScalingFactor
0x00686390L: {
'GENERIC IMPLANT TEMPLATE IOD': ['Implant Template'],
None: ['Implant Template'],
},
# ToleranceTableSequence
0x300A0040L: {
'RT PLAN IOD': ['Plan'],
None: ['Plan'],
},
# MaximumAlongScanDistortion
0x00220038L: {
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
None: ['Image'],
},
# AdmittingDiagnosesDescription
0x00081080L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'BASIC CARDIAC EP IOD': ['Study'],
'RT TREATMENT SUMMARY RECORD IOD': ['Study'],
'12-LEAD ECG IOD': ['Study'],
'RESPIRATORY WAVEFORM IOD': ['Study'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Study'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Study'],
'BASIC VOICE AUDIO IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Study'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Study'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Study'],
'BASIC TEXT SR IOD': ['Study'],
'NM IMAGE IOD': ['Study'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'LENSOMETRY MEASUREMENTS IOD': ['Study'],
'MR SPECTROSCOPY IOD': ['Study'],
'ENCAPSULATED PDF IOD': ['Study'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CHEST CAD SR IOD': ['Study'],
'HEMODYNAMIC IOD': ['Study'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Study'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Study'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Study'],
'ENHANCED MR COLOR IMAGE IOD': ['Study'],
'ENHANCED CT IMAGE IOD': ['Study'],
'X-RAY RADIATION DOSE SR IOD': ['Study'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Study'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Study'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Study'],
'STEREOMETRIC RELATIONSHIP IOD': ['Study'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Study'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Study'],
'VL ENDOSCOPIC IMAGE IOD': ['Study'],
'KERATOMETRY MEASUREMENTS IOD': ['Study'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Study'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Study'],
'COMPREHENSIVE SR IOD': ['Study'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Study'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Study'],
'SPATIAL FIDUCIALS IOD': ['Study'],
'RT ION PLAN IOD': ['Study'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Study'],
'CT IMAGE IOD': ['Study'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Study'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Study'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Study'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'RT DOSE IOD': ['Study'],
'AMBULATORY ECG IOD': ['Study'],
'SURFACE SEGMENTATION IOD': ['Study'],
'MAMMOGRAPHY CAD SR IOD': ['Study'],
'VL MICROSCOPIC IMAGE IOD': ['Study'],
'RT BEAMS TREATMENT RECORD IOD': ['Study'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Study'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Study'],
'RT IMAGE IOD': ['Study'],
'SC IMAGE IOD': ['Study'],
None: ['Study', 'Unified Procedure Step'],
'SEGMENTATION IOD': ['Study'],
'PET IMAGE IOD': ['Study'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'DIGITAL X-RAY IMAGE IOD': ['Study'],
'REAL WORLD VALUE MAPPING IOD': ['Study'],
'SPATIAL REGISTRATION IOD': ['Study'],
'COLON CAD SR IOD': ['Study'],
'INTRAVASCULAR OCT IMAGE IOD': ['Study'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Study'],
'ENHANCED PET IMAGE IOD': ['Study'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Study'],
'US MULTI-FRAME IMAGE IOD': ['Study'],
'ENHANCED X-RAY RF IMAGE IOD': ['Study'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Study'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Study'],
'US IMAGE IOD': ['Study'],
'GENERAL ECG IOD': ['Study'],
'XRF IMAGE IOD': ['Study'],
'ENCAPSULATED CDA IOD': ['Study'],
'ENHANCED SR IOD': ['Study'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Study'],
'GENERAL AUDIO WAVEFORM IOD': ['Study'],
'MR IMAGE IOD': ['Study'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Study'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Study'],
'ARTERIAL PULSE WAVEFORM IOD': ['Study'],
},
# ReconstructionMethod
0x00541103L: {
'PET IMAGE IOD': ['Series'],
None: ['Series'],
},
# IVUSAcquisition
0x00183100L: {
'US MULTI-FRAME IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'US IMAGE IOD': ['Image'],
None: ['Image'],
},
# ReferencedFrameNumber
0x00081160L: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# TableTopRollAngle
0x300A0144L: {
'RT IMAGE IOD': ['Image'],
None: ['Image'],
},
# VisualFieldCatchTrialSequence
0x00240034L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# ImagedNucleus
0x00180085L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# ProcedureStepState
0x00741000L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
None: ['Unified Procedure Step'],
},
# TemporalPositionIdentifier
0x00200100L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# PixelSpacing
0x00280030L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Dose'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# ImageHorizontalFlip
0x00700041L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Presentation State'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
},
# PerformedStationGeographicLocationCodeSequence
0x00404030L: {
'GENERAL PURPOSE PERFORMED PROCEDURE STEP IOD': ['General Purpose Performed Procedure Step'],
None: ['General Purpose Performed Procedure Step'],
},
# LowRRValue
0x00181081L: {
'US IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# TomoTime
0x00181480L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
},
# EnergyWindowInformationSequence
0x00540012L: {
'NM IMAGE IOD': ['Image'],
| |
conversion to records on 1D series"
series = ts.time_series([1, 2, 3],
start_date=ts.Date('M', '2001-01-01'),
mask=[0, 1, 0])
ndtype = [('_dates', int), ('_data', int), ('_mask', bool)]
control = np.array([(24001, 1, False),
(24002, 2, True),
(24003, 3, False)], dtype=ndtype)
test = series.torecords()
assert_equal(test, control)
def test_torecords_2D(self):
"Test torecords on 2D series"
series = ts.time_series([[1, 1], [2, 2], [3, 3]],
start_date=ts.Date('M', '2001-01-01'),
mask=[[0, 1], [0, 0], [1, 0]])
ndtype = [('_dates', int),
('_data', (int, (2,))),
('_mask', (bool, (2,)))]
control = np.array([(24001, [1, 1], [False, True]),
(24002, [2, 2], [False, False]),
(24003, [3, 3], [True, False])], dtype=ndtype)
test = series.torecords()
assert_equal_records(test, control)
def test_torecords_structured(self):
"Test torecords on structured array"
series = ts.time_series([(1, 1), (2, 2), (3, 3)],
start_date=ts.Date('M', '2001-01-01'),
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('a', int), ('b', float)])
ndtype = [('_dates', int),
('_data', [('a', int), ('b', float)]),
('_mask', [('a', bool), ('b', bool)])]
control = np.array([(24001, (1, 1), (False, True)),
(24002, (2, 2), (False, False)),
(24003, (3, 3), (True, False))], dtype=ndtype)
test = series.torecords()
assert_equal_records(test, control)
def test_reshape_1D(self):
"Test reshape on data w/ 1 variables"
start = ts.Date('M', '2001-01')
series = ts.time_series([1, 2, 3, 4], mask=[0, 0, 1, 0],
start_date=start)
test = series.reshape(2, 2)
control = ts.time_series([[1, 2], [3, 4]], mask=[[0, 0], [1, 0]],
dates=ts.date_array(start_date=start,
length=4).reshape(2, 2))
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.dates, control.dates)
assert_equal(test.varshape, series.varshape)
#
test = series.copy()
test.shape = (2, 2)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.dates, control.dates)
assert_equal(test.varshape, series.varshape)
def test_reshape_1V(self):
"Test reshape on series w/ 2 variables"
series = ts.time_series([[1, 2], [3, 4]],
mask=[[0, 0], [1, 0]],
start_date=ts.Date('M', '2001-01'))
test = series.reshape((-1, 1))
control = ts.time_series([[[1, 2]], [[3, 4]]],
mask=[[[0, 0]], [[1, 0]]],
dates=series.dates.reshape((-1, 1)))
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.dates, control.dates)
assert_equal(test.varshape, control.varshape)
#
test = series.reshape((1, -1, 1))
control = ts.time_series([[[[1, 2]], [[3, 4]]]],
mask=[[[[0, 0]], [[1, 0]]]],
dates=series.dates.reshape((1, -1, 1)))
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.dates, control.dates)
def test_reshaping_1D(self):
"Tests the reshaping of a 1D series."
series1D = self.info['series1D']
newshape = (3, 1)
test1D = series1D.reshape(newshape)
assert_equal(test1D.shape, newshape)
assert_equal(test1D.series.shape, newshape)
assert_equal(test1D.dates.shape, newshape)
assert_equal(test1D.varshape, series1D.varshape)
# Make sure we haven't propagated the new shape
self.failUnless(test1D.shape != series1D.shape)
self.failUnless(test1D.dates.shape != series1D.dates.shape)
# Using .shape
test1D = series1D.copy()
test1D.shape = newshape
assert_equal(test1D.shape, newshape)
assert_equal(test1D.series.shape, newshape)
assert_equal(test1D.dates.shape, newshape)
self.failUnless(series1D.dates.shape != newshape)
assert_equal(test1D.varshape, series1D.varshape)
# Using multiple args
test1D = series1D.reshape(*newshape)
assert_equal(test1D.shape, newshape)
assert_equal(test1D.varshape, series1D.varshape)
def test_reshape_batch(self):
"Test a succession of reshape"
a = ts.time_series([1, 2, 3], start_date=ts.now('D'))
test = a.reshape(-1, 1)
assert_equal(test.shape, (3, 1))
assert_equal(test.varshape, ())
test = a.reshape(-1, 1).reshape(-1)
assert_equal(test.shape, (3,))
assert_equal(test.varshape, ())
def test_reshaping_2D(self):
"Tests the reshaping of a nV/nD series."
series3V = self.info['series3V']
newshape = (1, 3, 3)
try:
test3V = series3V.reshape(newshape)
assert_equal(test3V.shape, newshape)
assert_equal(test3V.series.shape, newshape)
assert_equal(test3V.dates.shape, (1, 3))
assert_equal(test3V.varshape, series3V.varshape)
except NotImplementedError:
pass
else:
raise Exception("Reshaping nV/nD series should be implemented!")
# Using .shape
try:
test3V = series3V.copy()
test3V.shape = newshape
assert_equal(test3V.shape, newshape)
assert_equal(test3V.series.shape, newshape)
assert_equal(test3V.dates.shape, (1, 3))
assert_equal(test3V.varshape, series3V.varshape)
except NotImplementedError:
pass
else:
raise Exception("Reshaping nV/nD series should be implemented!")
def test_ravel_1D(self):
"Test .ravel on 1D data"
series = ts.time_series([1, 2, 3, 4],
mask=[0, 0, 1, 0],
start_date=ts.Date('M', '2009-01'))
test = series.ravel()
assert_equal(test, series)
assert_equal(test.mask, series.mask)
assert_equal(test.dates, series.dates)
assert_equal(test.varshape, series.varshape)
def test_ravel_1V(self):
"Test .ravel on nD/1V data"
dates = ts.date_array(start_date=ts.Date('M', '2009-01'),
length=4)
series = ts.time_series([[1, 2], [3, 4]],
mask=[[0, 0], [1, 0]],
dates=dates)
test = series.ravel()
assert_equal(test.data, series.data.ravel())
assert_equal(test.mask, series.mask.ravel())
assert_equal(test.dates, series.dates.ravel())
assert_equal(test.varshape, series.varshape)
assert_equal(test.varshape, ())
def test_ravel_2V(self):
"Test .ravel on 2V data"
series = ts.time_series([[1, 2], [3, 4]],
mask=[[0, 0], [1, 0]],
start_date=ts.Date('M', '2009-01'),)
test = series.ravel()
assert_equal(test.data, series.data)
assert_equal(test.mask, series.mask)
assert_equal(test.dates, series.dates)
assert_equal(test.varshape, series.varshape)
#
dates = ts.date_array(start_date=ts.Date('M', '2009-01'),
length=2)
series = ts.time_series([[[1, 2]], [[3, 4]]],
mask=[[[0, 0]], [[1, 0]]],
dates=dates.reshape(1, 2))
test = series.ravel()
assert_equal(test.data, [[1, 2], [3, 4]])
assert_equal(test.mask, [[0, 0], [1, 0]])
assert_equal(test.dates, series.dates.ravel())
assert_equal(test.varshape, (2,))
#------------------------------------------------------------------------------
class TestFunctions(TestCase):
"Some getitem tests"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
dlist = ['2007-01-%02i' % i for i in range(1, 16)]
dates = date_array(dlist, freq='D')
data = ma.array(np.arange(15), mask=[1, 0, 0, 0, 0] * 3)
self.d = (time_series(data, dates), data, dates)
#
def test_adjustendpoints(self):
"Tests adjust_endpoints"
(series, data, dates) = self.d
dseries = adjust_endpoints(series, series.dates[0], series.dates[-1])
assert_equal(dseries, series)
dseries = adjust_endpoints(series, series.dates[3], series.dates[-3])
assert_equal(dseries, series[3:-2])
dseries = adjust_endpoints(series,
end_date=Date('D', string='2007-01-31'))
assert_equal(dseries.size, 31)
assert_equal(dseries._mask, np.r_[series.mask, [1] * 16])
dseries = adjust_endpoints(series,
end_date=Date('D', string='2007-01-06'))
assert_equal(dseries.size, 6)
assert_equal(dseries, series[:6])
dseries = adjust_endpoints(series,
start_date=Date('D', string='2007-01-06'),
end_date=Date('D', string='2007-01-31'))
assert_equal(dseries.size, 26)
assert_equal(dseries._mask, np.r_[series.mask[5:], [1] * 16])
#
def test_adjustendpoints_withdatestrings(self):
"Tests adjust_endpoints w/ string dates"
(series, data, dates) = self.d
dseries = adjust_endpoints(series, end_date='2007-01-31')
assert_equal(dseries.size, 31)
assert_equal(dseries._mask, np.r_[series.mask, [1] * 16])
dseries = adjust_endpoints(series, end_date='2007-01-06')
assert_equal(dseries.size, 6)
assert_equal(dseries, series[:6])
dseries = adjust_endpoints(series,
start_date='2007-01-06',
end_date='2007-01-31')
assert_equal(dseries.size, 26)
assert_equal(dseries._mask, np.r_[series.mask[5:], [1] * 16])
#
def test_alignseries(self):
"Tests align_series & align_with"
(series, data, dates) = self.d
#
empty_series = time_series([], freq='d')
a, b = align_series(series, empty_series)
assert_equal(a.start_date, b.start_date)
assert_equal(a.end_date, b.end_date)
#
aseries = time_series(data, dates + 10)
bseries = time_series(data, dates - 10)
(a, b) = align_with(series, aseries, bseries)
assert_equal(a.dates, series.dates)
assert_equal(b.dates, series.dates)
assert_equal(a[-5:], series[:5])
assert_equal(b[:5], series[-5:])
#
def test_tshift(self):
"Test tshift function"
series = self.d[0]
shift_negative = series.tshift(-1)
result_data = [999] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
result_mask = [ 1] + [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0]
shift_negative_result = time_series(result_data,
dates=series.dates,
mask=result_mask)
shift_positive = series.tshift(1)
result_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + [999]
result_mask = [0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0] + [ 1]
shift_positive_result = time_series(result_data,
dates=series.dates,
mask=result_mask)
assert_array_equal(shift_negative, shift_negative_result)
assert_array_equal(shift_positive, shift_positive_result)
#
def test_split(self):
"""Test the split function."""
ms = time_series(np.arange(62).reshape(31, 2),
start_date=Date(freq='d', year=2005, month=7, day=1))
d1, d2 = split(ms)
assert_array_equal(d1.data, ms.data[:, 0])
assert_array_equal(d1.dates, ms.dates)
assert_array_equal(d2.data, ms.data[:, 1])
series = self.d[0]
ss = split(series)[0]
assert_array_equal(series, ss)
def test_convert(self):
"""Test convert function
Just check basic functionality. The details of the actual
date conversion algorithms already tested by asfreq in the
test_dates test suite.
"""
June2005M = Date(freq='M', year=2005, month=6)
lowFreqSeries = time_series(np.arange(10), start_date=June2005M)
# Conversion to same frequency
assert_array_equal(lowFreqSeries, lowFreqSeries.convert("M"))
# Conversion to higher frequency - position=START
lowToHigh_start = lowFreqSeries.convert('B', position='START')
assert_equal(lowToHigh_start.start_date,
June2005M.asfreq("B", relation="START"))
assert_equal(lowToHigh_start.end_date,
(June2005M + 9).asfreq("B", relation="END"))
assert_equal(lowToHigh_start.mask[0], False)
assert_equal(lowToHigh_start.mask[-1], True)
# Conversion to higher frequencyt - position=END
lowToHigh_end = lowFreqSeries.convert('B', position='END')
assert_equal(lowToHigh_end.start_date,
June2005M.asfreq("B", relation="START"))
assert_equal(lowToHigh_end.end_date,
(June2005M + 9).asfreq("B", relation="END"))
assert_equal(lowToHigh_end.mask[0], True)
assert_equal(lowToHigh_end.mask[-1], False)
# ensure that position argument is not case sensitive
lowToHigh_start_lowercase = lowFreqSeries.convert('B', position='start')
assert_array_equal(lowToHigh_start, lowToHigh_start_lowercase)
#
# Conversion to lower frequency
June2005B = Date(freq='b', year=2005, month=6, day=1)
highFreqSeries = time_series(np.arange(100), start_date=June2005B)
highToLow = highFreqSeries.convert('M', func=None)
assert_equal(highToLow.ndim, 2)
assert_equal(highToLow.shape[1], 23)
assert_equal(highToLow.start_date, June2005B.asfreq('M'))
assert_equal(highToLow.end_date, (June2005B + 99).asfreq('M'))
def test_convert_with_func(self):
"Test convert w/ function on 1D series"
mdata = ts.time_series(np.arange(24),
mask=[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
start_date=ts.Date('M', '2001-01'))
test = mdata.convert('A', func=ts.last_unmasked_val)
control = ts.time_series([7, 22], start_date=ts.Date('A', '2001'))
assert_equal(test, control)
def test_convert_nd_with_func(self):
"Test convert w/ function on nD series"
ndseries = time_series(np.arange(124).reshape(62, 2),
start_date=Date(freq='D', string='2005-07-01'))
assert_equal(ndseries.convert('M', sum), [[930, 961], [2852, 2883]])
def test_fill_missing_dates(self):
"""Test fill_missing_dates function"""
_start = Date(freq='m', year=2005, month=1)
_end = Date(freq='m', year=2005, month=4)
#
dates = date_array([_start, _end], freq='M')
series = time_series([1, 2], dates)
filled_ser = fill_missing_dates(series)
#
assert_equal(filled_ser.start_date, _start)
assert_equal(filled_ser.end_date, _end)
self.failUnless(filled_ser.is_full())
self.failUnless(not filled_ser.has_duplicated_dates())
assert_equal(filled_ser.size, _end - _start + 1)
#
data = np.arange(5 * 24).reshape(5, 24)
datelist = ['2007-07-0%i' % i for i in (1, 2, 3, 5, 6)]
dates = date_array(datelist, freq='D')
dseries = time_series(data, dates)
ndates = date_array(start_date=dates[0], end_date=dates[-2])
#
fseries = fill_missing_dates(dseries)
assert_equal(fseries.shape, (6, 24))
assert_equal(fseries.mask[:, 0], [0, 0, 0, 1, 0, 0])
#
fseries = fill_missing_dates(dseries[:, 0])
assert_equal(fseries.shape, (6,))
assert_equal(fseries.mask, [0, 0, 0, 1, 0, 0])
#
series = time_series(data.ravel()[:4].reshape(2, 2), dates=dates[:-1])
fseries = fill_missing_dates(series)
assert_equal(fseries.shape, (5,))
assert_equal(fseries.mask, [0, 0, 0, 1, 0, ])
def test_fill_missing_dates_structured_arrays(self):
"Test fill_missing_dates on structured arrays"
ndtype = [('a', float), ('b', float)]
series = ts.time_series([(1, 1), (2, 2), (3, | |
media."""
return await self.request(ep.MEDIA_CLOSE)
async def rewind(self):
"""Rewind media."""
return await self.request(ep.MEDIA_REWIND)
async def fast_forward(self):
"""Fast Forward media."""
return await self.request(ep.MEDIA_FAST_FORWARD)
# Keys
async def send_enter_key(self):
"""Send enter key."""
return await self.request(ep.SEND_ENTER)
async def send_delete_key(self):
"""Send delete key."""
return await self.request(ep.SEND_DELETE)
# Text entry
async def insert_text(self, text, replace=False):
"""Insert text into field, optionally replace existing text."""
return await self.request(ep.INSERT_TEXT, {"text": text, "replace": replace})
# Web
async def open_url(self, url):
"""Open URL."""
return await self.request(ep.OPEN, {"target": url})
async def close_web(self):
"""Close web app."""
return await self.request(ep.CLOSE_WEB_APP)
async def luna_request(self, uri, params):
"""luna api call."""
# n.b. this is a hack which abuses the alert API
# to call the internal luna API which is otherwise
# not exposed through the websocket interface
# An important limitation is that any returned
# data is not accessible
# set desired action for click, fail and close
# for redundancy/robustness
lunauri = f"luna://{uri}"
buttons = [{"label": "", "onClick": lunauri, "params": params}]
payload = {
"message": " ",
"buttons": buttons,
"onclose": {"uri": lunauri, "params": params},
"onfail": {"uri": lunauri, "params": params},
}
ret = await self.request(ep.CREATE_ALERT, payload)
alertId = ret.get("alertId")
if alertId is None:
raise PyLGTVCmdException("Invalid alertId")
return await self.request(ep.CLOSE_ALERT, payload={"alertId": alertId})
async def input_button(self):
"""Input button."""
return await self.luna_request(ep.LUNA_SHOW_INPUT_PICKER, {})
async def set_current_picture_mode(self, pic_mode):
"""Set picture mode for current input, dynamic range and 3d mode.
Known picture modes are: cinema, eco, expert1, expert2, game,
normal, photo, sports, technicolor, filmMaker, vivid, hdrEffect, hdrCinema,
hdrCinemaBright, hdrExternal, hdrGame, hdrStandard, hdrTechnicolor, hdrFilmMaker,
hdrVivid, dolbyHdrCinema, dolbyHdrCinemaBright, dolbyHdrDarkAmazon,
dolbyHdrGame, dolbyHdrStandard, dolbyHdrVivid, dolbyStandard
Likely not all modes are valid for all tv models.
"""
params = {"category": "picture", "settings": {"pictureMode": pic_mode}}
return await self.luna_request(ep.LUNA_SET_SYSTEM_SETTINGS, params)
async def set_picture_mode(
self, pic_mode, tv_input, dynamic_range="sdr", stereoscopic="2d"
):
"""Set picture mode for specific input, dynamic range and 3d mode.
Known picture modes are: cinema, eco, expert1, expert2, game,
normal, photo, sports, technicolor, filmMaker, vivid, hdrEffect, hdrCinema,
hdrCinemaBright, hdrExternal, hdrGame, hdrStandard, hdrTechnicolor, hdrFilmMaker,
hdrVivid, dolbyHdrCinema, dolbyHdrCinemaBright, dolbyHdrDarkAmazon,
dolbyHdrGame, dolbyHdrStandard, dolbyHdrVivid, dolbyStandard
Known inputs are: atv, av1, av2, camera, comp1, comp2, comp3,
default, dtv, gallery, hdmi1, hdmi2, hdmi3, hdmi4,
hdmi1_pc, hdmi2_pc, hdmi3_pc, hdmi4_pc, ip, movie,
photo, pictest, rgb, scart, smhl
Known dynamic range modes are: sdr, sdrALLM, hdr, hdrALLM,
technicolorHdr, technicolorHdrALLM, dolbyHdr, dolbyHdrALLM
Known stereoscopic modes are: 2d, 3d
Likely not all inputs and modes are valid for all tv models.
"""
params = {
"category": f"picture${tv_input}.x.{stereoscopic}.{dynamic_range}",
"settings": {"pictureMode": pic_mode},
}
return await self.luna_request(ep.LUNA_SET_SYSTEM_SETTINGS, params)
async def set_current_picture_settings(self, settings):
"""Set picture settings for current picture mode, input, dynamic range and 3d mode.
A possible list of settings and example values are below (not all settings are applicable
for all modes and/or tv models):
/etc/palm/defaultSettings.json
"adjustingLuminance": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"adjustingLuminance10pt": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"ambientLightCompensation": "off",
"backlight": "80",
"blackLevel": {
"ntsc": "low",
"ntsc443": "auto",
"pal": "auto",
"pal60": "auto",
"palm": "low",
"paln": "high",
"secam": "auto",
"unknown": "auto"
},
"brightness": "50",
"color": "50",
"colorFilter": "off",
"colorGamut": "auto",
"colorManagementColorSystem": "red",
"colorManagementHueBlue": "0",
"colorManagementHueCyan": "0",
"colorManagementHueGreen": "0",
"colorManagementHueMagenta": "0",
"colorManagementHueRed": "0",
"colorManagementHueYellow": "0",
"colorManagementLuminanceBlue": "0",
"colorManagementLuminanceCyan": "0",
"colorManagementLuminanceGreen": "0",
"colorManagementLuminanceMagenta": "0",
"colorManagementLuminanceRed": "0",
"colorManagementLuminanceYellow": "0",
"colorManagementSaturationBlue": "0",
"colorManagementSaturationCyan": "0",
"colorManagementSaturationGreen": "0",
"colorManagementSaturationMagenta": "0",
"colorManagementSaturationRed": "0",
"colorManagementSaturationYellow": "0",
"colorTemperature": "0",
"contrast": "80",
"dynamicColor": "off",
"dynamicContrast": "off",
"edgeEnhancer": "on",
"expertPattern": "off",
"externalPqlDbType": "none",
"gamma": "high2",
"grassColor": "0",
"hPosition": "0",
"hSharpness": "10",
"hSize": "0",
"hdrDynamicToneMapping": "on",
"hdrLevel": "medium",
"localDimming": "medium",
"motionEyeCare": "off",
"motionPro": "off",
"motionProOLED": "off",
"mpegNoiseReduction": "off",
"noiseReduction": "off",
"peakBrightness": "off",
"pictureTempKey": "off",
"realCinema": "on",
"sharpness": "10",
"skinColor": "0",
"skyColor": "0",
"smoothGradation": "off",
"superResolution": "off",
"tint": "0",
"truMotionBlur": "10",
"truMotionJudder": "0",
"truMotionMode": "user",
"vPosition": "0",
"vSharpness": "10",
"vSize": "0",
"whiteBalanceApplyAllInputs": "off",
"whiteBalanceBlue": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceBlue10pt": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceBlueGain": "0",
"whiteBalanceBlueOffset": "0",
"whiteBalanceCodeValue": "21",
"whiteBalanceCodeValue10pt": "9",
"whiteBalanceColorTemperature": "warm2",
"whiteBalanceGreen": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceGreen10pt": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceGreenGain": "0",
"whiteBalanceGreenOffset": "0",
"whiteBalanceIre": "100",
"whiteBalanceIre10pt": "100",
"whiteBalanceLuminance": "130",
"whiteBalanceMethod": "2",
"whiteBalancePattern": "outer",
"whiteBalancePoint": "high",
"whiteBalanceRed": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceRed10pt": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"whiteBalanceRedGain": "0",
"whiteBalanceRedOffset": "0",
"xvycc": "auto"
"""
params = {"category": "picture", "settings": settings}
return await self.luna_request(ep.LUNA_SET_SYSTEM_SETTINGS, params)
async def set_picture_settings(
self, settings, pic_mode, tv_input, stereoscopic="2d"
):
"""Set picture settings for specific picture mode, input, and 3d mode."""
params = {
"category": f"picture${tv_input}.{pic_mode}.{stereoscopic}.x",
"settings": settings,
}
return await self.luna_request(ep.LUNA_SET_SYSTEM_SETTINGS, params)
async def set_other_settings(self, settings):
"""Set other settings.
A possible list of settings and example values are below (not all settings are applicable
for all tv models/firmwares):
/etc/palm/defaultSettings.json
"amazonHotkeyIsActive": true,
"appReturn": "",
"blackStabilizer": 10,
"blueLight": "off",
"care365": {
"accountName": "",
"accountNumber": "",
"userAgreementLocation": "",
"userAgreementVersion": "",
"value": "off"
},
"colorimetry": "auto",
"colorimetryHDMI1": "auto",
"colorimetryHDMI2": "auto",
"colorimetryHDMI3": "auto",
"colorimetryHDMI4": "auto",
"cursorAutoRemover": "on",
"dolbyVSVDBVer": "v2",
"dolbyVSVDBVerHDMI1": "v2",
"dolbyVSVDBVerHDMI2": "v2",
"dolbyVSVDBVerHDMI3": "v2",
"dolbyVSVDBVerHDMI4": "v2",
"enableQuickGame": "on",
"eotf": "auto",
"eotfHDMI1": "auto",
"eotfHDMI2": "auto",
"eotfHDMI3": "auto",
"eotfHDMI4": "auto",
"epgRowCount": "1",
"flickerPatternCtrl": false,
"freesync": "off",
"freesyncLCDHDMI1": "off",
"freesyncLCDHDMI2": "off",
"freesyncLCDHDMI3": "off",
"freesyncLCDHDMI4": "off",
"freesyncOLEDHDMI1": "off",
"freesyncOLEDHDMI2": "off",
"freesyncOLEDHDMI3": "off",
"freesyncOLEDHDMI4": "off",
"freesyncSupport": "off",
"freeviewTnCPopup": "off",
"gameGenre": "Standard",
"gameMode": {
"hdmi1": "off",
"hdmi2": "off",
"hdmi3": "off",
"hdmi4": "off"
},
"gameOptimization": "on",
"gameOptimizationHDMI1": "on",
"gameOptimizationHDMI2": "on",
"gameOptimizationHDMI3": "on",
"gameOptimizationHDMI4": "on",
"gameUIColor": "violet",
"hdmiPcMode": {
"hdmi1": false,
"hdmi2": false,
"hdmi3": false,
"hdmi4": false
},
"homeEffectVersion": [
{
"id": "Christmas",
"version": 1.0
},
{
"id": "Halloween",
"version": 1.0
}
],
"illuminanceThreshold": 0,
"inputOptimization": "auto",
"isFirstCapture": "true",
"isfUpdated": "false",
"lowLevelAdjustment": 0,
"lgLogoDisplay": "on",
"mapping_info": [
{
"movies": {
"app_id": null,
"isActive": false,
"launch_param": null
}
},
{
"netflix": {
"app_id": "netflix",
"isActive": true,
"launch_param": null
}
},
{
"amazon": {
"app_id": "amazon",
"isActive": true,
"launch_param": null
}
},
{
"ivi": {
"app_id": "ivi",
"isActive": true,
"launch_param": null
}
},
{
"disneyplus": {
"app_id": "com.disney.disneyplus-prod",
"isActive": true,
"launch_param": null
}
},
{
"lgchannels": {
"app_id": "com.webos.app.lgchannels",
"isActive": true,
"launch_param": null
}
},
{
"rakutentv": {
"app_id": "ui30",
"isActive": true,
"launch_param": null
}
},
{
"globoplay": {
"app_id": "globoplaywebos",
"isActive": true,
"launch_param": null
}
},
{
"okko": {
"app_id": "yota.play",
"isActive": true,
"launch_param": null
}
},
{
"kinopoisk": {
"app_id": "com.685631.3411",
"isActive": true,
"launch_param": null
}
},
{
"watchaplay": {
"app_id": "com.frograms.watchaplay.webos",
"isActive": true,
"launch_param": null
}
},
{
"unext": {
"app_id": "u.next",
"isActive": true,
"launch_param": null
}
},
{
"hotstar": {
"app_id": "hotstar",
"isActive": true,
"launch_param": null
}
},
{
"new": {
"app_id": null,
"isActive": false,
"launch_param": null
}
}
],
"masterLuminanceLevel": "540nit",
"masteringColor": "auto",
"masteringColorHDMI1": "auto",
"masteringColorHDMI2": "auto",
"masteringColorHDMI3": "auto",
"masteringColorHDMI4": "auto",
"masteringPeak": "auto",
"masteringPeakHDMI1": "auto",
"masteringPeakHDMI2": "auto",
"masteringPeakHDMI3": "auto",
"masteringPeakHDMI4": "auto",
"maxCLL": "auto",
"maxCLLHDMI1": "auto",
"maxCLLHDMI2": "auto",
"maxCLLHDMI3": "auto",
"maxCLLHDMI4": "auto",
"maxFALL": "auto",
"maxFALLHDMI1": "auto",
"maxFALLHDMI2": "auto",
"maxFALLHDMI3": "auto",
"maxFALLHDMI4": "auto",
"netflixHotkeyIsActive": true,
"newKey": "on",
"quickSettingsMenuList": [
"QuickSettings_picture_button",
"QuickSettings_soundMode_button",
"QuickSettings_soundOut_button",
"QuickSettings_game_button",
"QuickSettings_timer_button",
"QuickSettings_network_button",
"QuickSettings_menu_button"
],
"screenRemoteAutoShow": "true",
"screenRemoteExpanded": "false",
"screenRemotePosition": "right",
"simplinkAutoPowerOn": "on",
"simplinkEnable": "off",
"supportAirplay": false,
"supportBnoModel": false,
"ueiEnable": "off",
"uhdDeepColor8kHDMI1": "off",
"uhdDeepColor8kHDMI2": "off",
"uhdDeepColor8kHDMI3": "off",
"uhdDeepColor8kHDMI4": "off",
"uhdDeepColorAutoStatusHDMI1": "none",
"uhdDeepColorAutoStatusHDMI2": "none",
"uhdDeepColorAutoStatusHDMI3": "none",
"uhdDeepColorAutoStatusHDMI4": "none",
"uhdDeepColor": "off",
"uhdDeepColorHDMI1": "off",
"uhdDeepColorHDMI2": "off",
"uhdDeepColorHDMI3": "off",
"uhdDeepColorHDMI4": "off",
"weatherAllowed": false,
"whiteStabilizer": 10
"""
params = {"category": "other", "settings": settings}
return await self.luna_request(ep.LUNA_SET_SYSTEM_SETTINGS, params)
async def set_option_settings(self, settings):
"""Set option settings.
A possible list of settings and example values are below (not all settings are applicable
for all tv models/firmwares):
/etc/palm/defaultSettings.json
"IPControlSecureKey": "",
"_3dModeEstreamer": "off",
"additionalAudioSelection": "none",
"addressInfo": | |
not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1']['meta_info']
class NextHop2(object):
"""
The second next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-2'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2']['meta_info']
class NextHop3(object):
"""
The third next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-3'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop_1 is not None and self.next_hop_1._has_data():
return True
if self.next_hop_2 is not None and self.next_hop_2._has_data():
return True
if self.next_hop_3 is not None and self.next_hop_3._has_data():
return True
if self.next_hop_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop']['meta_info']
class Dscp(object):
"""
DSCP settings.
.. attribute:: dscp_max
Maximum DSCP value for comparion, leave unspecified if DSCP comparison is not to be performed or if only the minimum DSCP should be considered
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_min
DSCP value to match or minimum DSCP value for DSCP range comparison, leave unspecified if DSCP comparion is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_operator
DSCP operator is applicable only when DSCP range is configured. Leave unspecified if DSCP range is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dscp_max = None
self.dscp_min = None
self.dscp_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:dscp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dscp_max is not None:
return True
if self.dscp_min is not None:
return True
if self.dscp_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entry[Cisco-IOS-XR-ipv4-acl-cfg:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sequence_number is not None:
return True
if self.capture is not None:
return True
if self.counter_name is not None:
return True
if self.destination_network is not None and self.destination_network._has_data():
return True
if self.destination_port is not None and self.destination_port._has_data():
return True
if self.destination_port_group is not None:
return True
if self.destination_prefix_group is not None:
return True
if self.dscp is not None and self.dscp._has_data():
return True
if self.fragments is not None:
return True
if self.grant is not None:
return True
if self.icmp is not None and self.icmp._has_data():
return True
if self.icmp_off is not None:
return True
if self.igmp_message_type is not None:
return True
if self.log_option is not None:
return True
if self.next_hop is not None and self.next_hop._has_data():
return True
if self.packet_length is not None and self.packet_length._has_data():
return True
if self.precedence is not None:
return True
if self.protocol is not None:
return True
if self.remark is not None:
return True
if self.sequence_str is not None:
return True
if self.source_network is not None and self.source_network._has_data():
return True
if self.source_port is not None and self.source_port._has_data():
return True
if self.source_port_group is not None:
return True
if self.source_prefix_group is not None:
return True
if self.tcp is not None and self.tcp._has_data():
return True
if self.time_to_live is not None and self.time_to_live._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access_list_entry is not None:
for child_ref in self.access_list_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries']['meta_info']
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses/Cisco-IOS-XR-ipv4-acl-cfg:access[Cisco-IOS-XR-ipv4-acl-cfg:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.access_list_entries is not None and self.access_list_entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses']['meta_info']
class Prefixes(object):
"""
Table of ACL prefix lists. Entries in this
table and the PrefixListExistenceTable table
must be kept consistent
.. attribute:: prefix
Name of a prefix list
**type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = YList()
self.prefix.parent = self
self.prefix.name = 'prefix'
class Prefix(object):
"""
Name of a prefix list
.. attribute:: prefix_list_name <key>
Prefix list name \- max 32 characters
**type**\: str
.. attribute:: prefix_list_entries
Sequence of entries forming a prefix list
**type**\: :py:class:`PrefixListEntries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries>`
**presence node**\: True
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.prefix_list_entries = None
class PrefixListEntries(object):
"""
Sequence of entries forming a prefix list
.. attribute:: prefix_list_entry
A prefix list entry; either a description (remark) or a prefix to match against
**type**\: list of :py:class:`PrefixListEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries.PrefixListEntry>`
.. attribute:: _is_presence
Is present if this instance represents presence container | |
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model_grad = get_network_grad_flow(model)
# model_grads.update(model_grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
diversity_losses.update(diversity_loss.item(), input.size(0))
pose_losses.update(pose_loss.item(), input.size(0))
_, avg_acc, cnt, pred_a = accuracy(output.detach().cpu().numpy(),
target_a.detach().cpu().numpy())
_, avg_acc, cnt, pred_b = accuracy(output.detach().cpu().numpy(),
target_b.detach().cpu().numpy())
acc.update(avg_acc, cnt)
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
# 'model_grad {model_grad.val:.6f} ({model_grad.avg:.6f})\t' \
# 'DivLoss {diversity_loss.val:.5f}({diversity_loss.avg:.5f})\t' \
# '[{0}/{1}] ' \
# ({loss.avg:.5f})
# ({acc.avg:.3f})
# ({pose_loss.avg:.5f})
if config.LOG:
msg = 'Loss {loss.val:.5f} ' \
'Acc {acc.val:.3f} ' \
'PoseLs {pose_loss.val:.5f}'.format(
i, len(train_loader),
loss=losses, acc=acc,
# model_grad=model_grads,
# diversity_loss=diversity_losses,
pose_loss=pose_losses)
train_loader.set_description(msg)
# logger.info(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
save_size = 2
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
# suffix = 'a'
# for count in range(min(save_size, len(lambda_a))):
# suffix += '_[{}:{}]'.format(count, round(lambda_a[count].item(), 2))
#
# meta_a['pred_joints_vis'] = torch.ones_like(meta_a['joints_vis'])
# save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_a, target_a[:save_size], (pred_a*4)[:save_size], output[:save_size], prefix, suffix)
#
# prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
# suffix = 'b'
# for count in range(min(save_size, len(lambda_a))):
# suffix += '_[{}:{}]'.format(count, round(lambda_a[count + B].item(), 2))
#
# meta_b['pred_joints_vis'] = torch.ones_like(meta_b['joints_vis'])
# save_debug_images(config, input[B:B+save_size, [2,1,0], :, :], meta_b, target_b[B:B+save_size], (pred_b*4)[B:B+save_size], output[B:B+save_size], prefix, suffix)
train_loader.close()
return
# dcp-cnn-------
def train_dcp(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict, print_prefix=''):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
OCC_WEIGHT: 1
OCCEE_WEIGHT: 1
occ_weight = config['MODEL']['HEAD']['OCC_WEIGHT']
occee_weight = config['MODEL']['HEAD']['OCCEE_WEIGHT']
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_a, target_weight_a, meta_a, target_b, target_weight_b, meta_b) in enumerate(train_loader):
# measure data loading time
# data_time.update(time.time() - end)
input = input.cuda()
outputs = model(input)
occ_pose, occee_pose = outputs
target_a = target_a.cuda(non_blocking=True)
target_weight_a = target_weight_a.cuda(non_blocking=True)
target_b = target_b.cuda(non_blocking=True)
target_weight_b = target_weight_b.cuda(non_blocking=True)
loss_occ = criterion(occ_pose, target_a, target_weight_a)
loss_occee = criterion(occee_pose, target_b, target_weight_b)
pose_loss = occ_weight * loss_occ + occee_weight * loss_occee
# loss = pose_loss + 0.1*diversity_loss
loss = pose_loss
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model_grad = get_network_grad_flow(model)
# model_grads.update(model_grad)
# measure accuracy and record loss
pose_lossAMer.update(pose_loss.item(), input.size(0))
_, avg_acc, cnt, pred_a = accuracy(occ_pose.detach().cpu().numpy(),
target_a.detach().cpu().numpy())
_, avg_acc, cnt, pred_b = accuracy(occee_pose.detach().cpu().numpy(),
target_b.detach().cpu().numpy())
accAMer.update(avg_acc, cnt)
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if config.LOG:
msg = 'Loss:{loss:.5f} Acc:{acc:.5f}'.format(loss=pose_lossAMer.val, acc=accAMer.val)
train_loader.set_description(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', pose_lossAMer.val, global_steps)
writer.add_scalar('train_acc', accAMer.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
train_loader.close()
return
# dcp-------
# dcp-cnn------------
def train_dcp_cnn(config, train_loader, model, criterion, optimizer, writer_dict):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
cu_weight, cd_weight, ru_weight, rd_weight = config['MODEL']['HEAD']['OUT_WEIGHT']
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_oc, target_weight_oc, meta_oc, target_oced, target_weight_oced, meta_oced) in enumerate(train_loader):
# data_time.update(time.time() - end)
input = input.cuda()
pose_dict = model(input)
cu = pose_dict['cu']
cd = pose_dict['cd']
ru = pose_dict['ru']
rd = pose_dict['rd']
target_oc = target_oc.cuda(non_blocking=True)
target_weight_oc = target_weight_oc.cuda(non_blocking=True)
target_oced = target_oced.cuda(non_blocking=True)
target_weight_oced = target_weight_oced.cuda(non_blocking=True)
loss_cu = criterion(cu, target_oc, target_weight_oc)
loss_ru = criterion(ru, target_oc, target_weight_oc)
loss_cd = criterion(cd, target_oced, target_weight_oced)
loss_rd = criterion(rd, target_oced, target_weight_oced)
pose_loss = loss_cu*cu_weight + loss_ru*ru_weight + loss_cd*cd_weight + loss_rd*rd_weight
# loss = pose_loss + 0.1*diversity_loss
loss = pose_loss
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
pose_lossAMer.update(pose_loss.item(), input.size(0))
_, acc_cu, cnt_cu, pred_a = accuracy(cu.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_ru, cnt_ru, pred_a = accuracy(ru.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_cd, cnt_cd, pred_b = accuracy(cd.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
_, acc_rd, cnt_rd, pred_b = accuracy(rd.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
accAMer.update(acc_cu, cnt_cu)
accAMer.update(acc_ru, cnt_ru)
accAMer.update(acc_cd, cnt_cd)
accAMer.update(acc_rd, cnt_rd)
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if config.LOG:
msg = 'Loss:{loss:.5f} Acc:{acc:.5f}'.format(loss=pose_lossAMer.val, acc=accAMer.val)
train_loader.set_description(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', pose_lossAMer.val, global_steps)
writer.add_scalar('train_acc', accAMer.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
train_loader.close()
return
# dcp-cnn------------
# dcp-cnn------------
def train_dcp_cr_ocks(config, train_loader, model, criterion, ref_criterion, optimizer, writer_dict):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
cu_weight, cd_weight, ru_weight, rd_weight = config['MODEL']['HEAD']['OUT_WEIGHT']
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_oc, target_weight_oc, meta_oc, target_oced, target_weight_oced, meta_oced) in enumerate(train_loader):
# data_time.update(time.time() - end)
input = input.cuda()
pose_dict = model(input)
cu = pose_dict['cu']
cd = pose_dict['cd']
ru = pose_dict['ru']
rd = pose_dict['rd']
target_oc = target_oc.cuda(non_blocking=True)
target_weight_oc = target_weight_oc.cuda(non_blocking=True)
target_oced = target_oced.cuda(non_blocking=True)
target_weight_oced = target_weight_oced.cuda(non_blocking=True)
loss_cu = criterion(cu, target_oc, target_weight_oc)
loss_ru = ref_criterion(ru, target_oc, target_weight_oc)
loss_cd = criterion(cd, target_oced, target_weight_oced)
loss_rd = ref_criterion(rd, target_oced, target_weight_oced)
pose_loss = loss_cu*cu_weight + loss_ru*ru_weight + loss_cd*cd_weight + loss_rd*rd_weight
# loss = pose_loss + 0.1*diversity_loss
loss = pose_loss
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
pose_lossAMer.update(pose_loss.item(), input.size(0))
_, acc_cu, cnt_cu, pred_a = accuracy(cu.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_ru, cnt_ru, pred_a = accuracy(ru.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_cd, cnt_cd, pred_b = accuracy(cd.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
_, acc_rd, cnt_rd, pred_b = accuracy(rd.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
accAMer.update(acc_cu, cnt_cu)
accAMer.update(acc_ru, cnt_ru)
accAMer.update(acc_cd, cnt_cd)
accAMer.update(acc_rd, cnt_rd)
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if config.LOG:
msg = 'Loss:{loss:.5f} Acc:{acc:.5f}'.format(loss=pose_lossAMer.val, acc=accAMer.val)
train_loader.set_description(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', pose_lossAMer.val, global_steps)
writer.add_scalar('train_acc', accAMer.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
train_loader.close()
return
# dcp-cnn------------
# dcp-skt------------
def train_dcp_skt(config, train_loader, model, criterion, skt_criterion, optimizer, writer_dict):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
upPose_weight, downPose_weight, upSkt_weight, downSkt_weight = config['MODEL']['HEAD']['OUT_WEIGHT']
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_oc, target_weight_oc, meta_oc, target_oced, target_weight_oced, meta_oced) in enumerate(train_loader):
# data_time.update(time.time() - end)
input = input.cuda()
pose_dict = model(input)
upPose = pose_dict['up']
downPose = pose_dict['down']
upSkt = pose_dict['up_skt']
# downSkt = pose_dict['down_skt']
target_oc = target_oc.cuda(non_blocking=True)
target_weight_oc = target_weight_oc.cuda(non_blocking=True)
target_oced = target_oced.cuda(non_blocking=True)
target_weight_oced = target_weight_oced.cuda(non_blocking=True)
upSktGt = meta_oc['skt_gt']
upSktGt = upSktGt.cuda(non_blocking=True)
downSktGT = meta_oced['skt_gt']
# downSktGT = downSktGT.cuda(non_blocking=True)
loss_upPose = criterion(upPose, target_oc, target_weight_oc)
loss_downPose = criterion(downPose, target_oced, target_weight_oced)
loss_upSkt = 0.1 * skt_criterion.apply(upSkt, upSktGt)
# loss_downSkt = 0.1 * skt_criterion.apply(downSkt, downSktGT)
pose_loss = loss_upPose*upPose_weight + loss_upSkt*upSkt_weight + loss_downPose*downPose_weight #+ loss_downSkt*downSkt_weight
# loss = pose_loss + 0.1*diversity_loss
loss = pose_loss
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
pose_lossAMer.update(pose_loss.item(), input.size(0))
_, acc_cu, cnt_cu, pred_a = accuracy(upPose.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_cd, cnt_cd, pred_b = accuracy(downPose.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
accAMer.update(acc_cu, cnt_cu)
accAMer.update(acc_cd, cnt_cd)
# measure elapsed time
# batch_time.update(time.time() - end)
# end = time.time()
if config.LOG:
msg = 'Loss:{loss:.5f} Acc:{acc:.5f}'.format(loss=pose_lossAMer.val, acc=accAMer.val)
train_loader.set_description(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', pose_lossAMer.val, global_steps)
writer.add_scalar('train_acc', accAMer.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
train_loader.close()
return
# dcp-skt------------
# dcp-naive------------
def train_dcp_naive(config, train_loader, model, criterion, optimizer, writer_dict):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
_, _, ru_weight, rd_weight = config['MODEL']['HEAD']['OUT_WEIGHT']
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_oc, target_weight_oc, meta_oc, target_oced, target_weight_oced, meta_oced) in enumerate(train_loader):
# data_time.update(time.time() - end)
input = input.cuda()
pose_dict = model(input)
up_pose = pose_dict['up']
down_pose = pose_dict['down']
target_oc = target_oc.cuda(non_blocking=True)
target_weight_oc = target_weight_oc.cuda(non_blocking=True)
target_oced = target_oced.cuda(non_blocking=True)
target_weight_oced = target_weight_oced.cuda(non_blocking=True)
loss_up_pose = criterion(up_pose, target_oc, target_weight_oc)
loss_down_pose = criterion(down_pose, target_oced, target_weight_oced)
pose_loss = loss_up_pose*ru_weight + loss_down_pose*rd_weight
loss = pose_loss
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
pose_lossAMer.update(pose_loss.item(), input.size(0))
_, acc_up, cnt_up, pred_a = accuracy(up_pose.detach().cpu().numpy(), target_oc.detach().cpu().numpy())
_, acc_down, cnt_down, pred_b = accuracy(down_pose.detach().cpu().numpy(), target_oced.detach().cpu().numpy())
accAMer.update(acc_up, cnt_up)
accAMer.update(acc_down, cnt_down)
# measure elapsed time
if config.LOG:
msg = 'Loss:{loss:.5f} Acc:{acc:.5f}'.format(loss=pose_lossAMer.val, acc=accAMer.val)
train_loader.set_description(msg)
if i % config.PRINT_FREQ == 0 and config.LOG:
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', pose_lossAMer.val, global_steps)
writer.add_scalar('train_acc', accAMer.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
train_loader.close()
return
# dcp-naive------------
# one------------
def train_one(config, train_loader, model, criterion, optimizer, writer_dict):
accAMer = AverageMeter()
pose_lossAMer = AverageMeter()
# switch to train mode
model.train()
train_loader = tqdm(train_loader)
# end = time.time()
for i, (input, target_oc, target_weight_oc, meta_oc) in enumerate(train_loader):
# data_time.update(time.time() - end)
input = input.cuda()
pose = model(input)
# up_pose = pose_dict['up']
# | |
"""
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Parse processor-specific CSR description YAML file and generate a CSR test file.
This test code will utilize every CSR instruction, writing values to the CSR
and then using a prediction function to calculate a reference value that will
be written into another register and compared against the value actually stored
in the CSR at this point, allowing for the test to self-check in order to
determine success or failure.
"""
"""
To install the bitstring library:
1) sudo apt-get install python3-bitstring OR
2) pip install bitstring
"""
import sys
import yaml
import argparse
import random
import copy
try:
from bitstring import BitArray as bitarray
except ImportError as e:
logging.error("Please install bitstring package: sudo apt-get install python3-bitstring")
sys.exit(1)
"""
Defines the test's success/failure values, one of which will be written to
the chosen signature address to indicate the test's result.
"""
TEST_RESULT = 1
TEST_PASS = 0
TEST_FAIL = 1
def get_csr_map(csr_file, xlen):
"""
Parses the YAML file containing CSR descriptions.
Args:
csr_file: The CSR YAML file.
xlen: The current RISC-V ISA bit length.
Returns:
A dictionary contining mappings for each CSR, of the form:
{ csr_name : [csr_address, csr_val_bitarray, csr_write_mask_bitarray, csr_read_mask_bitarray] }
"""
rv_string = "rv{}".format(str(xlen))
csrs = {}
with open(csr_file, "r") as c:
csr_description = yaml.safe_load(c)
for csr_dict in csr_description:
csr_name = csr_dict.get("csr")
csr_address = csr_dict.get("address")
assert(rv_string in csr_dict), "The {} CSR must be configured for rv{}".format(csr_name, str(rv))
csr_value = bitarray(uintbe=0, length=xlen)
csr_write_mask = []
csr_read_mask = bitarray(uintbe=0, length=xlen)
csr_field_list = csr_dict.get(rv_string)
for csr_field_detail_dict in csr_field_list:
field_type = csr_field_detail_dict.get("type")
field_val = csr_field_detail_dict.get("reset_val")
field_msb = csr_field_detail_dict.get("msb")
field_lsb = csr_field_detail_dict.get("lsb")
field_size = field_msb - field_lsb + 1
if field_type != "WPRI":
val_bitarray = bitarray(uint=field_val, length=field_size)
mask_bitarray = bitarray(uint=1, length=1) * field_size
start_pos = xlen - 1 - field_msb
end_pos = xlen - 1 - field_lsb
csr_read_mask.overwrite(mask_bitarray, xlen - 1 - field_msb)
csr_value.overwrite(val_bitarray, xlen - 1 - field_msb)
access = True if field_type == "R" else False
csr_write_mask.append([mask_bitarray, (start_pos, end_pos), access])
csrs.update({csr_name : [csr_address, csr_value, csr_write_mask, csr_read_mask]})
return csrs
def get_rs1_val(iteration, xlen):
"""
Calculates and returns the 3 test RS1 values that will be used
to exercise the CSR.
Args:
iteration: Integer between 0 and 2 inclusive, indicates which
test value to return.
xlen: The currnet RISC-V ISA bit length.
Returns:
A bitarray encoding the value that will be written to the CSR to test it.
Will be one of 3 values:
1) 0xa5a5...
2) 0x5a5a...
3) A randomly generated number
"""
if iteration == 0:
return bitarray(hex=f"0x{'a5'*int(xlen/8)}")
elif iteration == 1:
return bitarray(hex=f"0x{'5a'*int(xlen/8)}")
elif iteration == 2:
val = bitarray(uint=0, length=xlen)
# Must randomize all 32 bits, due to randomization library limitations
for i in range(32):
bit = random.randint(0, 1)
val.set(bit, i)
return val
def csr_write(val, csr_val, csr_write_mask):
"""
Performs a CSR write.
Args:
val: A bitarray containing the value to be written.
csr_val: A bitarray containing the current CSR value.
csr_write_mask: A bitarray containing the CSR's mask.
"""
for bitslice in csr_write_mask:
read_only = bitslice[2]
start_index = bitslice[1][0]
end_index = bitslice[1][1]
length = end_index - start_index + 1
mask_val = bitslice[0]
# only write if not read only
if not read_only:
val_slice = val[start_index:end_index+1]
csr_val.overwrite(mask_val & val_slice, start_index)
"""
CSR Read:
Reads the given CSR, after applying the bitmask
"""
def csr_read(csr_val, csr_read_mask):
"""
Performs a CSR read.
Args:
csr_val: A bitarray containing the current CSR value.
csr_read_mask: A bitarray containing the CSR's read mask.
Returns:
A bitarray of the logical AND of csr_val and csr_read_mask.
"""
return csr_val & csr_read_mask
def predict_csr_val(csr_op, rs1_val, csr_val, csr_write_mask, csr_read_mask):
"""
Predicts the CSR reference value, based on the current CSR operation.
Args:
csr_op: A string of the CSR operation being performed.
rs1_val: A bitarray containing the value to be written to the CSR.
csr_val: A bitarray containing the current value of the CSR.
csr_write_mask: A bitarray containing the CSR's write mask.
csr_read_mask: A bitarray containing the CSR's read mask
Returns:
A hexadecimal string of the predicted CSR value.
"""
prediction = None
# create a zero bitarray to zero extend immediates
zero = bitarray(uint=0, length=csr_val.len - 5)
if csr_op == 'csrrw':
prediction = csr_read(csr_val, csr_read_mask)
csr_write(rs1_val, csr_val, csr_write_mask)
elif csr_op == 'csrrs':
prediction = csr_read(csr_val, csr_read_mask)
csr_write(rs1_val | prediction, csr_val, csr_write_mask)
elif csr_op == 'csrrc':
prediction = csr_read(csr_val, csr_read_mask)
csr_write((~rs1_val) & prediction, csr_val, csr_write_mask)
elif csr_op == 'csrrwi':
prediction = csr_read(csr_val, csr_read_mask)
zero.append(rs1_val[-5:])
csr_write(zero, csr_val, csr_write_mask)
elif csr_op == 'csrrsi':
prediction = csr_read(csr_val, csr_read_mask)
zero.append(rs1_val[-5:])
csr_write(zero | prediction, csr_val, csr_write_mask)
elif csr_op == 'csrrci':
prediction = csr_read(csr_val, csr_read_mask)
zero.append(rs1_val[-5:])
csr_write((~zero) & prediction, csr_val, csr_write_mask)
return f"0x{prediction.hex}"
def gen_setup(test_file):
"""
Generates the setup code for the CSR test.
Args:
test_file: the file containing the generated assembly code.
"""
test_file.write(f".macro init\n")
test_file.write(f".endm\n")
test_file.write(f".section .text.init\n")
test_file.write(f".globl _start\n")
test_file.write(f".option norvc\n")
test_file.write(f"_start:\n")
def gen_csr_test_fail(test_file, end_addr):
"""
Generates code to handle a test failure.
This code consists of writing 1 to the GP register in an infinite loop.
The testbench will poll this register at the end of the test to detect failure.
Args:
test_file: the file containing the generated assembly test code.
end_addr: address that should be written to at end of test
"""
test_file.write(f"csr_fail:\n")
test_file.write(f"\tli x1, {TEST_FAIL}\n")
test_file.write(f"\tslli x1, x1, 8\n")
test_file.write(f"\taddi x1, x1, {TEST_RESULT}\n")
test_file.write(f"\tli x2, {end_addr}\n")
test_file.write(f"\tsw x1, 0(x2)\n")
test_file.write(f"\tj csr_fail\n")
def gen_csr_test_pass(test_file, end_addr):
"""
Generates code to handle test success.
This code consists of writing 2 to the GP register in an infinite loop.
The testbench will poll this register at the end of the test to detect success.
Args:
test_file: the file containing the generated assembly test code.
end_addr: address that should be written to at end of test
"""
test_file.write(f"csr_pass:\n")
test_file.write(f"\tli x1, {TEST_PASS}\n")
test_file.write(f"\tslli x1, x1, 8\n")
test_file.write(f"\taddi x1, x1, {TEST_RESULT}\n")
test_file.write(f"\tli x2, {end_addr}\n")
test_file.write(f"\tsw x1, 0(x2)\n")
test_file.write(f"\tj csr_pass\n")
def gen_csr_instr(original_csr_map, csr_instructions, xlen,
iterations, out, end_signature_addr):
"""
Uses the information in the map produced by get_csr_map() to generate
test CSR instructions operating on the generated random values.
Args:
original_csr_map: The dictionary containing CSR mappings generated by get_csr_map()
csr_instructions: A list of all supported CSR instructions in string form.
xlen: The RISC-V ISA bit length.
iterations: Indicates how many randomized test files will be generated.
out: A string containing the directory path that the tests will be generated in.
end_signature_addr: The address the test should write to upon terminating
Returns:
No explicit return value, but will write the randomized assembly test code
to the specified number of files.
"""
for i in range(iterations):
# pick two GPRs at random to act as source and destination registers
# for CSR operations
csr_map = copy.deepcopy(original_csr_map)
source_reg, dest_reg = [f"x{i}" for i in random.sample(range(1, 16), 2)]
csr_list = list(csr_map.keys())
with open(f"{out}/riscv_csr_test_{i}.S", "w") as csr_test_file:
gen_setup(csr_test_file)
for csr in csr_list:
csr_address, csr_val, csr_write_mask, csr_read_mask = csr_map.get(csr)
csr_test_file.write(f"\t# {csr}\n")
for op in csr_instructions:
for i in range(3):
# hex string
rand_rs1_val = get_rs1_val(i, xlen)
# I type CSR instruction
first_li = ""
if op[-1] == "i":
imm = rand_rs1_val[-5:]
csr_inst = f"\t{op} {dest_reg}, {csr_address}, 0b{imm.bin}\n"
imm_val = bitarray(uint=0, length=xlen-5)
imm_val.append(imm)
predict_li = (f"\tli {source_reg}, "
f"{predict_csr_val(op, imm_val, csr_val, csr_write_mask, csr_read_mask)}\n")
else:
first_li = f"\tli {source_reg}, 0x{rand_rs1_val.hex}\n"
csr_inst = f"\t{op} {dest_reg}, {csr_address}, {source_reg}\n"
predict_li = (f"\tli {source_reg}, "
f"{predict_csr_val(op, rand_rs1_val, csr_val, csr_write_mask, csr_read_mask)}\n")
branch_check = f"\tbne {source_reg}, {dest_reg}, csr_fail\n"
csr_test_file.write(first_li)
csr_test_file.write(csr_inst)
csr_test_file.write(predict_li)
csr_test_file.write(branch_check)
"""
We must hardcode in one final CSR check, as the value that has last
been written to the CSR has not been tested.
"""
if csr == csr_list[-1] and op == csr_instructions[-1] and i == 2:
final_csr_read = f"\tcsrr {dest_reg}, {csr_address}\n"
csrrs_read_mask = bitarray(uint=0, length=xlen)
final_li = (f"\tli {source_reg}, "
f"{predict_csr_val('csrrs', csrrs_read_mask, csr_val, csr_write_mask, csr_read_mask)}\n")
final_branch_check | |
<reponame>sthagen/numba-numba
import math
import operator
import sys
import pickle
import multiprocessing
import ctypes
import warnings
from distutils.version import LooseVersion
import re
import numpy as np
from numba import njit, jit, vectorize, guvectorize, objmode
from numba.core import types, errors, typing, compiler, cgutils
from numba.core.typed_passes import type_inference_stage
from numba.core.registry import cpu_target
from numba.core.compiler import compile_isolated
from numba.core.imputils import lower_constant
from numba.tests.support import (
TestCase,
captured_stdout,
temp_directory,
override_config,
run_in_new_process_in_cache_dir,
skip_if_typeguard,
)
from numba.core.errors import LoweringError
import unittest
from numba.extending import (
typeof_impl,
type_callable,
lower_builtin,
lower_cast,
overload,
overload_attribute,
overload_method,
models,
register_model,
box,
unbox,
NativeValue,
intrinsic,
_Intrinsic,
register_jitable,
get_cython_function_address,
is_jitted,
overload_classmethod,
)
from numba.core.typing.templates import (
ConcreteTemplate,
signature,
infer,
infer_global,
AbstractTemplate,
)
# Pandas-like API implementation
from .pdlike_usecase import Index, Series
try:
import scipy
if LooseVersion(scipy.__version__) < "0.19":
sc = None
else:
import scipy.special.cython_special as sc
except ImportError:
sc = None
# -----------------------------------------------------------------------
# Define a custom type and an implicit cast on it
class MyDummy(object):
pass
class MyDummyType(types.Opaque):
def can_convert_to(self, context, toty):
if isinstance(toty, types.Number):
from numba.core.typeconv import Conversion
return Conversion.safe
mydummy_type = MyDummyType("mydummy")
mydummy = MyDummy()
@typeof_impl.register(MyDummy)
def typeof_mydummy(val, c):
return mydummy_type
@lower_cast(MyDummyType, types.Number)
def mydummy_to_number(context, builder, fromty, toty, val):
"""
Implicit conversion from MyDummy to int.
"""
return context.get_constant(toty, 42)
def get_dummy():
return mydummy
register_model(MyDummyType)(models.OpaqueModel)
@unbox(MyDummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a second custom type but w/o implicit cast to Number
def base_dummy_type_factory(name):
class DynType(object):
pass
class DynTypeType(types.Opaque):
pass
dyn_type_type = DynTypeType(name)
@typeof_impl.register(DynType)
def typeof_mydummy(val, c):
return dyn_type_type
register_model(DynTypeType)(models.OpaqueModel)
return DynTypeType, DynType, dyn_type_type
MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2")
@unbox(MyDummyType2)
def unbox_index2(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a function's typing and implementation using the classical
# two-step API
def func1(x=None):
raise NotImplementedError
def type_func1_(context):
def typer(x=None):
if x in (None, types.none):
# 0-arg or 1-arg with None
return types.int32
elif isinstance(x, types.Float):
# 1-arg with float
return x
return typer
type_func1 = type_callable(func1)(type_func1_)
@lower_builtin(func1)
@lower_builtin(func1, types.none)
def func1_nullary(context, builder, sig, args):
return context.get_constant(sig.return_type, 42)
@lower_builtin(func1, types.Float)
def func1_unary(context, builder, sig, args):
def func1_impl(x):
return math.sqrt(2 * x)
return context.compile_internal(builder, func1_impl, sig, args)
# We can do the same for a known internal operation, here "print_item"
# which we extend to support MyDummyType.
@infer
class PrintDummy(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, mydummy_type)]
@lower_builtin("print_item", MyDummyType)
def print_dummy(context, builder, sig, args):
[x] = args
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
pyapi.print_object(strobj)
pyapi.decref(strobj)
return context.get_dummy_value()
# -----------------------------------------------------------------------
# Define an overloaded function (combined API)
def where(cond, x, y):
raise NotImplementedError
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
def call_where(cond, x, y):
return where(cond, y=y, x=x)
@overload(where)
def overload_where_arrays(cond, x, y):
"""
Implement where() for arrays.
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
if x.dtype != y.dtype:
raise errors.TypingError("x and y should have the same dtype")
# Array where() => return an array of the same shape
if all(ty.layout == "C" for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
return where_impl
# We can define another overload function for the same function, they
# will be tried in turn until one succeeds.
@overload(where)
def overload_where_scalars(cond, x, y):
"""
Implement where() for scalars.
"""
if not isinstance(cond, types.Array):
if x != y:
raise errors.TypingError("x and y should have the same type")
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
# Can't use full_like() on Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
return where_impl
# -----------------------------------------------------------------------
# Overload an already defined built-in function, extending it for new types.
@overload(len)
def overload_len_dummy(arg):
if isinstance(arg, MyDummyType):
def len_impl(arg):
return 13
return len_impl
@overload(operator.add)
def overload_add_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_add_impl(arg1, arg2):
return 42
return dummy_add_impl
@overload(operator.delitem)
def overload_dummy_delitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_delitem_impl(obj, idx):
print("del", obj, idx)
return dummy_delitem_impl
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_getitem_impl(obj, idx):
return idx + 123
return dummy_getitem_impl
@overload(operator.setitem)
def overload_dummy_setitem(obj, idx, val):
if all(
[
isinstance(obj, MyDummyType),
isinstance(idx, types.Integer),
isinstance(val, types.Integer),
]
):
def dummy_setitem_impl(obj, idx, val):
print(idx, val)
return dummy_setitem_impl
def call_add_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_add_binop(arg1, arg2):
return arg1 + arg2
@overload(operator.iadd)
def overload_iadd_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_iadd_impl(arg1, arg2):
return 42
return dummy_iadd_impl
def call_iadd_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_iadd_binop(arg1, arg2):
arg1 += arg2
return arg1
def call_delitem(obj, idx):
del obj[idx]
def call_getitem(obj, idx):
return obj[idx]
def call_setitem(obj, idx, val):
obj[idx] = val
@overload_method(MyDummyType, "length")
def overload_method_length(arg):
def imp(arg):
return len(arg)
return imp
def cache_overload_method_usecase(x):
return x.length()
def call_func1_nullary():
return func1()
def call_func1_unary(x):
return func1(x)
def len_usecase(x):
return len(x)
def print_usecase(x):
print(x)
def getitem_usecase(x, key):
return x[key]
def npyufunc_usecase(x):
return np.cos(np.sin(x))
def get_data_usecase(x):
return x._data
def get_index_usecase(x):
return x._index
def is_monotonic_usecase(x):
return x.is_monotonic_increasing
def make_series_usecase(data, index):
return Series(data, index)
def clip_usecase(x, lo, hi):
return x.clip(lo, hi)
# -----------------------------------------------------------------------
def return_non_boxable():
return np
@overload(return_non_boxable)
def overload_return_non_boxable():
def imp():
return np
return imp
def non_boxable_ok_usecase(sz):
mod = return_non_boxable()
return mod.arange(sz)
def non_boxable_bad_usecase():
return return_non_boxable()
def mk_func_input(f):
pass
@infer_global(mk_func_input)
class MkFuncTyping(AbstractTemplate):
def generic(self, args, kws):
assert isinstance(args[0], types.MakeFunctionLiteral)
return signature(types.none, *args)
def mk_func_test_impl():
mk_func_input(lambda a: a)
# -----------------------------------------------------------------------
# Define a types derived from types.Callable and overloads for them
class MyClass(object):
pass
class CallableTypeRef(types.Callable):
def __init__(self, instance_type):
self.instance_type = instance_type
self.sig_to_impl_key = {}
self.compiled_templates = []
super(CallableTypeRef, self).__init__('callable_type_ref'
'[{}]'.format(self.instance_type))
def get_call_type(self, context, args, kws):
res_sig = None
for template in context._functions[type(self)]:
try:
res_sig = template.apply(args, kws)
except Exception:
pass # for simplicity assume args must match exactly
else:
compiled_ovlds = getattr(template, '_compiled_overloads', {})
if args in compiled_ovlds:
self.sig_to_impl_key[res_sig] = compiled_ovlds[args]
self.compiled_templates.append(template)
break
return res_sig
def get_call_signatures(self):
sigs = list(self.sig_to_impl_key.keys())
return sigs, True
def get_impl_key(self, sig):
return self.sig_to_impl_key[sig]
@register_model(CallableTypeRef)
class CallableTypeModel(models.OpaqueModel):
def __init__(self, dmm, fe_type):
models.OpaqueModel.__init__(self, dmm, fe_type)
infer_global(MyClass, CallableTypeRef(MyClass))
@lower_constant(CallableTypeRef)
def constant_callable_typeref(context, builder, ty, pyval):
return context.get_dummy_value()
# -----------------------------------------------------------------------
@overload(np.exp)
def overload_np_exp(obj):
if isinstance(obj, MyDummyType):
def imp(obj):
# Returns a constant if a MyDummyType is seen
return 0xDEADBEEF
return imp
class TestLowLevelExtending(TestCase):
"""
Test the low-level two-tier extension API.
"""
# We check with both @jit and compile_isolated(), to exercise the
# registration logic.
def test_func1(self):
pyfunc = call_func1_nullary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), 42)
pyfunc = call_func1_unary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(None), 42)
self.assertPreciseEqual(cfunc(18.0), 6.0)
def test_func1_isolated(self):
pyfunc = call_func1_nullary
cr = compile_isolated(pyfunc, ())
self.assertPreciseEqual(cr.entry_point(), 42)
pyfunc = call_func1_unary
cr = compile_isolated(pyfunc, (types.float64,))
self.assertPreciseEqual(cr.entry_point(18.0), 6.0)
def test_type_callable_keeps_function(self):
self.assertIs(type_func1, type_func1_)
self.assertIsNotNone(type_func1)
def test_cast_mydummy(self):
pyfunc = get_dummy
cr = compile_isolated(pyfunc, (), types.float64)
self.assertPreciseEqual(cr.entry_point(), 42.0)
def test_mk_func_literal(self):
"""make sure make_function is passed to typer class as a literal
"""
test_ir = compiler.run_frontend(mk_func_test_impl)
typingctx = cpu_target.typing_context
targetctx = cpu_target.target_context
typingctx.refresh()
targetctx.refresh()
typing_res = type_inference_stage(typingctx, targetctx, test_ir, (),
None)
self.assertTrue(
any(
isinstance(a, types.MakeFunctionLiteral)
for a in typing_res.typemap.values()
)
)
class TestPandasLike(TestCase):
"""
Test implementing a pandas-like Index object.
Also stresses most of the high-level API.
"""
def test_index_len(self):
i = Index(np.arange(3))
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(i), 3)
def test_index_getitem(self):
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(getitem_usecase)
self.assertPreciseEqual(cfunc(i, 1), 8)
ii = cfunc(i, slice(1, None))
self.assertIsInstance(ii, Index)
self.assertEqual(list(ii), [8, -5])
def test_index_ufunc(self):
"""
Check Numpy ufunc on an Index object.
"""
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(npyufunc_usecase)
ii = cfunc(i)
self.assertIsInstance(ii, Index)
self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data)))
def test_index_get_data(self):
# The _data attribute is exposed with make_attribute_wrapper()
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(get_data_usecase)
data = cfunc(i)
self.assertIs(data, i._data)
def test_index_is_monotonic(self):
# The is_monotonic_increasing attribute is exposed with
# overload_attribute()
cfunc = jit(nopython=True)(is_monotonic_usecase)
for values, expected in [
([8, 42, 5], False),
([5, 8, 42], True),
([], True),
]:
i = Index(np.int32(values))
got = cfunc(i)
self.assertEqual(got, expected)
def test_series_len(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(s), 3)
def test_series_get_index(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(get_index_usecase)
got = cfunc(s)
self.assertIsInstance(got, Index)
self.assertIs(got._data, i._data)
def test_series_ufunc(self):
"""
Check Numpy ufunc on an Series object.
"""
i = Index(np.int32([42, 8, -5]))
s = Series(np.int64([1, 2, | |
#=================================================================================================
##################################################################################################
# TODO:
#
# 1. Resolve: Is the q-integrand that of Essig's Eq. 3.13 or a mix of 3.13 and 4.4? (correction_option==0,1)
# 2. Resolve: Where are q_arr & E_arr: edges of units, or middles? (see last term of lines 61, 62)
# 3. Resolve: (Related): Do we bin with 'floor' or 'round'? (bin_option=='floor', 'round')
# 4. Resolve: Do we sum over array entries or do we use simps integration? (integration=='sum','simps')
# 5. Resolve: In binning, do I need to average over bin width?
#
##################################################################################################
#=================================================================================================
#=================================================================================================
#
# Project: DM-e Direct Detection & Astrophysics
#
# Desciption : Rates module
# A module that outputs different scattering rates, as a function of deposited
# energy or time of the year.
#
# Tips : Where to start to use this module:
#
# # to import this module:
# >>>import rates as rt
# # to display this information:
# >>>help(rate)
#
#
# Author : <NAME> (February 2020)
#
#=================================================================================================
from __future__ import division # for division operator
import numpy as np
import math
from math import pi
from itertools import cycle
from scipy.integrate import simps
from ..astrophysics import astrophysics as ap
from ..semiconductors import semiconductors as sc
#---------------------------------------------------------------------------------------------------
# CONSTANTS AND CONVERSION FACTORS
sec2year = 60*60*24*365.25 # converts [year] to [s]
kms2cms = 1.e5 # convert [km/s] to [cm/s]
a_em = 1/137. # fine structure constant
meeV = 0.511e6 # electron mass [eV]
ccms = 2.99792458e10 # speed of light [cm/s]
rho_chi = 0.4e9 #local DM density [eV/cm^3]
# NUMERICAL PARAMETERS
script_dir = str(__file__).rstrip('rates.py')
fileDirectory = script_dir+"rates_data/"
qunit = 0.02*a_em*meeV # eV
Eunit = 0.1 # eV
k_mesh = 137 # see below Eq. (4.2) in 1509.01598 Essig et al.
wk = 2./k_mesh # according to the convention from Quantum Espresso (see below Eq. (4.2) in 1509.01598 Essig et al. )
# TODO: generalize to other cases? what if atomic targets are included?
# q_arr = qunit*np.linspace(1., float(sc.Si_num._fshape[1]), sc.Si_num._fshape[1]) - qunit/2. # momentum transfer [eV] array. It's the q-columns in 1509.01598 Essig et al.'s table of fcrystal2. sc.Si_num used by default, but it's the same with sc.Ge_num. TODO: The last term guarantees that each entry in the array is centered at the half-values.
# E_arr = Eunit*np.linspace(1., float(sc.Si_num._fshape[0]), sc.Si_num._fshape[0]) - Eunit/2. # energy [eV] array. It's the E-rows in 1509.01598 Essig et al.'s table of fcrystal2. sc.Si_num used by default, but it's the same with sc.Ge_num. TODO: The last term guarantees that each entry in the array is centered at the half-values.
EGr, qGr = np.meshgrid(sc.E_arr, sc.q_arr, indexing='ij')
# FUNCTIONS DICTIONARY
components = ap.dark_matter_components.keys() # all the DM components defined in the astrophysics module
fn_dir = ap.init_fns(components=components, methods=['gvmin']) # defining the dictionary of functions with default parameters
# fn_dir = init_fns(components=components, methods=[], vArr=, timeArr=, vboost=, vsun=, t1=, from_scratch=, taken_from=, arrays=) # with other parameters
#---------------------------------------------------------------------------------------------------
# UNIVERSAL FUNCTIONS
#---------------------------------------------------------------------------------------------------
# Useful Functions
def my_floor(val, epsilon=1.e-6):
"""
Floor function, that makes sure periodic .999... in floats are treated correctly.
Parameters
----------
val : value whose floor function we want to calculate
epsilon : thershold of how close does a float with trailing periodic .999... needs to be to be considered the same as its rounded value (default: 1.e-6)
"""
val2 = np.floor(val)
if abs(val-(val2+1)) < epsilon:
return np.int(val2) + 1
else:
return np.int(val2)
def my_round(val, eN=1.e6):
"""
A rounding function, that makes sure periodic .4999... in floats are treated correctly.
Parameters
----------
val : value we want to round.
eN : a large number we use to lift up the trailing .4999 above the decimal point, and then round up (default: 1.e6)
"""
return int(np.round(np.round(val*eN)/eN))
def energy_bin(En, bin_option='floor'):
"""
Finds the bin that corresponds to an energy Ed in the sc.E_arr. Note that bin = index+1.
Parameters
----------
En : energy [eV] for which we want to find its corresponding bin in sc.E_arr.
bin_option : whether we are using the 'floor' or 'round' functions to obtain the bin (default: 'floor')
"""
if bin_option == 'floor':
return my_floor(En/sc.Eunit) + 1 # floor function to select the bin
elif bin_option == 'round':
return my_round(En/sc.Eunit) # rounds up or down to select the bin
#---------------------------------------------------------------------------------------------------
# Basic Functions
def mu_chie(m_chi):
"""
DM-electron reduced mass [eV].
Parameters
----------
m_chi : DM mass [eV]
"""
return meeV*m_chi/(meeV + m_chi)
def FDM(q, n):
"""
DM form factor.
Parameters
----------
q : momentum transfer [eV]
n : power
"""
return (a_em*meeV/q)**n
def vmin(q, Ee, m_chi):
"""
Minimum speed [km/s] a DM must have for the electron to gain the energy required. Comes from energy conservation.
Parameters
----------
q : momentum transfer [eV]
Ee : deposited energy Ee [eV]
m_chi : DM mass [eV]
"""
return ccms*(q/(2.*m_chi) + Ee/q)/kms2cms
#---------------------------------------------------------------------------------------------------
# RATES
#---------------------------------------------------------------------------------------------------
# Differential rate for fixed deposited energy and day of the year
def dRdE(m_chi, xsec, n, Ed, target_type='semiconductor', target_material='Si_num', MTarget=1., Time=1., dm_comp='SHM_default', variables='E', day=ap.tvernal, bin_option='floor', correction_option=0, integrate='sum', wanna_print=False):
"""
DM-e scattering event rate per target mass per unit energy [# events/eV/kg/year]
Parameters
----------
m_chi : DM mass [eV]
xsec : DM-e cross section [cm^2]
n : power of DM form factor FDM(q,n)
Ed : deposited energy [eV]
target_type : whether 'semiconductor' or 'atomic' (default: 'semiconductor')
target_material : the material that makes up the target (default: 'Si_num')
MTarget : the mass of the target [kg] (default: 1 kg)
Time : the unit time for the rate [yr] (default: 1 yr)
dm_comp : the DM component under consideration (default: 'SHM_default')
variables : whether 'E' (Energy) [eV] or 'Et'/'tE' (Energy [eV]-time [days]) (default: 'E')
day : day of the year at which rate is measured (default: ap.tvernal, the vernal equinox)
TODO: bin_option, correction_option, integrate, wanna_print
"""
exposure = Time*sec2year*MTarget # the exposure in kg*s
if target_type == 'semiconductor':
if target_material not in sc.semiconductor_targets.keys(): raise ValueError("The \'target_material\' passed has not been instantiated in the \'semiconductors\' module. Either limit yourself to "+str(sc.semiconductor_targets.keys())+" or instantiate it in the \'semiconductors\' module and add it to the dictionary \'semiconductor_targets\'.")
target = sc.semiconductor_targets[target_material] # the class of the semiconductor target material
estimate = (exposure/target._MCell) * (rho_chi/m_chi) * xsec * (a_em*ccms) # a rough estimate of the logarithmic rate dR/dlnE (~ Time * N_Cell * <n_DM * xsec * v_rel>), which turns out to be also the first factor in the exact expression (see. Eq. 3.13 in Essig et al.)
correction = 1. # some default value. Will be calculated below.
if target._crystal_function == 'numeric':
# Ei: the row index in the crystal function corresponding to the given value of the deposited energy.
# Ei = int(math.floor(Ed/sc.Eunit)) - 1
Ei = energy_bin(Ed, bin_option=bin_option) - 1
vmin_arr = vmin(sc.q_arr, Ed, m_chi) # shape: (len(sc.q_arr), ): that many q-columns
if variables == 'E':
# gvmin_vec = np.vectorize(fn_dir[dm_comp, 'gvmin', 'v']) # vectorizing gvmin [s/km] for the DM component considered
# eta = gvmin_vec(vmin(sc.q_arr, Ed, m_chi))/kms2cms # same as gvmin, but in [s/cm]
gvmin = fn_dir[dm_comp, 'gvmin', 'v']
eta = gvmin(vmin_arr)/kms2cms # shape: (len(sc.q_arr), )
del gvmin
elif (variables == 'Et' or variables == 'tE'):
# gvmin_vec = np.vectorize(fn_dir[dm_comp, 'gvmin', 'tv']) # vectorizing gvmin [s/km] for the DM component considered
# eta = gvmin_vec(day, vmin(sc.q_arr, Ed, m_chi))/kms2cms # same as gvmin, but in [s/cm]
gvmin = fn_dir[dm_comp, 'gvmin', 'vt']
eta = gvmin(vmin_arr, day)/kms2cms # shape: (len(sc.q_arr), ): that many q-columns
del gvmin
else: raise ValueError("Parameter \'variables\' can only be either \'E\' or \'Et\'.")
if wanna_print: print 'shape of vmin_arr:', vmin_arr.shape
del vmin_arr
# correction: the other factor in the calculation of the rate
# correction = sc.Eunit * (meeV**2. / mu_chie(m_chi)**2.) * sum( [ sc.qunit /(qi*sc.qunit) * (eta(qi)*ccms) * FDM(qi*sc.qunit, n)**2. * target._Eprefactor * 1./sc.qunit * 1./sc.Eunit * wk/2. * wk/2. * 1./wk * target._fcrystal2[qi-1, Ej-1] for qi in range(1, target._fshape[0]+1) ] ) # with eta as an exact function of continuous energy Ed
if correction_option == 0:
f2 = target._fcrystal2
elif correction_option == 1:
f2 = target._fmatrix2
else:
raise ValueError("Did not understand value of \'correction_option\'. Must be either 0 or 1.")
if integrate == 'sum':
correction = (meeV**2. / mu_chie(m_chi)**2.) * np.sum(sc.qunit * sc.q_arr**-2. * | |
import datetime as dt
from io import StringIO
import logging
import numpy as np
import os
import pytest
import warnings
import aacgmv2
class TestConvertArray:
def setup(self):
self.out = None
self.ref = None
self.rtol = 1.0e-4
def teardown(self):
del self.out, self.ref, self.rtol
def evaluate_output(self, ind=None):
""" Function used to evaluate convert_latlon_arr output"""
if self.out is not None:
if ind is not None:
self.ref = [[rr[ind]] for rr in self.ref]
np.testing.assert_equal(len(self.out), len(self.ref))
for i, oo in enumerate(self.out):
if not isinstance(oo, np.ndarray):
raise TypeError("output value is not a numpy array")
np.testing.assert_equal(len(oo), len(self.ref[i]))
np.testing.assert_allclose(oo, self.ref[i], rtol=self.rtol)
class TestConvertLatLon:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 1.0457]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
1.4694]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 2.0973])])
def test_convert_latlon(self, alt, method_code, ref):
"""Test single value latlon conversion"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
@pytest.mark.parametrize('lat,ref',
[(90.01, [83.927161, 170.1471396, 1.04481923]),
(-90.01, [-74.9814852, 17.990332, 1.044819236])])
def test_convert_latlon_high_lat(self, lat, ref):
"""Test single latlon conversion with latitude just out of bounds"""
self.in_args[0] = lat
self.in_args.extend([300, self.dtime, 'G2A'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_convert_latlon_datetime_date(self):
"""Test single latlon conversion with date and datetime input"""
self.in_args.extend([300, self.ddate, 'TRACE'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 1.0457],
rtol=self.rtol)
def test_convert_latlon_location_failure(self):
"""Test single value latlon conversion with a bad location"""
self.out = aacgmv2.convert_latlon(0, 0, 0, self.dtime, self.in_args[-1])
assert np.all(np.isnan(np.array(self.out)))
def test_convert_latlon_maxalt_failure(self):
"""test convert_latlon failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.convert_latlon(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
(91, 0, "unrealistic latitude"),
(-91, 0, "unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_failure(self, in_rep, in_irep, msg):
self.in_args.extend([300, self.dtime, "G2A"])
self.in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon(*self.in_args)
class TestConvertLatLonArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.2268, 59.3184], [81.1613, 81.6080], [1.0457, 1.0456]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.lat_in, self.lon_in, self.alt_in, self.dtime, self.ddate
del self.method, self.out, self.ref, self.rtol
def test_convert_latlon_arr_single_val(self):
"""Test array latlon conversion for a single value"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_arr_single(self):
"""Test array latlon conversion for array input of shape (1,)"""
self.out = aacgmv2.convert_latlon_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list_single(self):
"""Test array latlon conversion for list input of single values"""
self.out = aacgmv2.convert_latlon_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list(self):
"""Test array latlon conversion for list input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr(self):
"""Test array latlon conversion for array input"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_list_mix(self):
"""Test array latlon conversion for mixed types with list"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mix(self):
"""Test array latlon conversion for mixed type with an array"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mult_and_single_element(self):
"""Test latlon conversion for arrays with multiple and single vals"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array([self.lon_in[0]]),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
@pytest.mark.parametrize('method_code,alt,local_ref',
[("BADIDEA", 3000.0,
[[64.3580], [83.2895], [1.4694]]),
("BADIDEA|TRACE", 7000.0,
[[69.3187], [85.0845], [2.0973]])])
def test_convert_latlon_arr_badidea(self, method_code, alt, local_ref):
"""Test array latlon conversion for BADIDEA"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[alt], self.dtime, method_code)
self.ref = local_ref
self.evaluate_output()
def test_convert_latlon_arr_location_failure(self):
"""Test array latlon conversion with a bad location"""
with warnings.catch_warnings():
# Causes all warnings to be surpressed
warnings.simplefilter("ignore")
# Trigger a warning
self.out = aacgmv2.convert_latlon_arr([0], [0], [0], self.dtime, "")
# Test the output
np.testing.assert_equal(len(self.out), len(self.ref))
assert np.any(~np.isfinite(np.array(self.out)))
def test_convert_latlon_arr_datetime_date(self):
"""Test array latlon conversion with date and datetime input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_clip(self):
"""Test array latlon conversion with latitude clipping"""
self.lat_in = [90.01, -90.01]
self.ref = [[83.92352053, -74.98110552], [170.1381271, 17.98164313],
[1.04481924, 1.04481924]]
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_maxalt_failure(self):
"""test convert_latlon_arr failure for altitudes too high for coeffs"""
self.method = ""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[2001], self.dtime, self.method)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
([np.full(shape=(3, 2), fill_value=50.0), 0],
[0, 1], "unable to process multi-dimensional"),
([50, 60, 70], 0, "arrays are mismatched"),
([[91, 60, -91], 0, 300], [0, 1, 2],
"unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_arr_failure(self, in_rep, in_irep, msg):
in_args = np.array([self.lat_in, self.lon_in, self.alt_in, self.dtime,
"G2A"], dtype=object)
in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon_arr(*in_args)
class TestGetAACGMCoord:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 0.1888]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
0.3307]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 0.4503])])
def test_get_aacgm_coord(self, alt, method_code, ref):
"""Test single value AACGMV2 calculation, defaults to TRACE"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_get_aacgm_coord_datetime_date(self):
"""Test single AACGMV2 calculation with date and datetime input"""
self.in_args.extend([300.0, self.ddate, 'TRACE'])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 0.1888],
rtol=self.rtol)
def test_get_aacgm_coord_location_failure(self):
"""Test single value AACGMV2 calculation with a bad location"""
self.in_args.extend([0.0, self.dtime, 'TRACE'])
self.in_args[0] = 0.0
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.all(np.isnan(np.array(self.out)))
def test_get_aacgm_coord_maxalt_failure(self):
"""test get_aacgm_coord failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_index,value',
[(3, None), (0, 91.0), (0, -91.0)])
def test_get_aacgm_coord_raise_value_error(self, in_index, value):
"""Test different ways to raise a ValueError"""
self.in_args.extend([300.0, self.dtime])
self.in_args[in_index] = value
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
class TestGetAACGMCoordArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.22676, 59.31847], [81.16135, 81.60797],
[0.18880, 0.21857]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.ref, self.lat_in, self.dtime, self.ddate
del self.lon_in, self.alt_in, self.method, self.rtol
def test_get_aacgm_coord_arr_single_val(self):
"""Test array AACGMV2 calculation for a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list_single(self):
"""Test array AACGMV2 calculation for list input of single values"""
self.out = aacgmv2.get_aacgm_coord_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_arr_single(self):
"""Test array AACGMV2 calculation for array with a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list(self):
"""Test array AACGMV2 calculation for list input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr(self):
"""Test array AACGMV2 calculation for an array"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_list_mix(self):
"""Test array AACGMV2 calculation for a list and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr_mix(self):
"""Test array AACGMV2 calculation for an array and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_badidea(self):
"""Test array AACGMV2 calculation for BADIDEA"""
self.method = "|".join([self.method, "BADIDEA"])
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
[3000.0], self.dtime,
self.method)
self.ref = [[64.3481], [83.2885], [0.3306]]
self.evaluate_output()
def test_get_aacgm_coord_arr_location_failure(self):
"""Test array AACGMV2 calculation with a bad location"""
self.out = aacgmv2.get_aacgm_coord_arr([0], [0], [0], self.dtime,
self.method)
np.testing.assert_equal(len(self.out), len(self.ref))
assert [isinstance(oo, np.ndarray) and len(oo) == 1 for oo in self.out]
assert np.any([np.isnan(oo) for oo in self.out])
def test_get_aacgm_coord_arr_mult_failure(self):
"""Test aacgm_coord_arr failure with multi-dim array input"""
with pytest.raises(ValueError):
(self.mlat_out, self.mlon_out,
self.mlt_out) = aacgmv2.get_aacgm_coord_arr(
np.array([[60, 61, 62], [63, 64, 65]]), 0, 300, self.dtime)
def test_get_aacgm_coord_arr_time_failure(self):
"""Test array AACGMV2 calculation with a bad time"""
with pytest.raises(ValueError):
aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in, self.alt_in,
None, self.method)
def test_get_aacgm_coord_arr_mlat_failure(self):
"""Test error return for co-latitudes above 90 for an array"""
self.lat_in = [91, 60, -91]
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
def test_get_aacgm_coord_arr_datetime_date(self):
"""Test array AACGMV2 calculation with date and datetime input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.ref = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
| |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
_weights_dict = dict()
def load_weights(weight_file):
if weight_file == None:
return
try:
weights_dict = np.load(weight_file, allow_pickle=True).item()
except:
weights_dict = np.load(weight_file, allow_pickle=True, encoding='bytes').item()
return weights_dict
class GoogleNetPlaces205Caffe(nn.Module):
def __init__(self, weight_file):
super(GoogleNetPlaces205Caffe, self).__init__()
global _weights_dict
_weights_dict = load_weights(weight_file)
self.conv1_7x7_s2 = self.__conv(2, name='conv1/7x7_s2', in_channels=3, out_channels=64, kernel_size=(7, 7), stride=(2, 2), groups=1, bias=True)
self.conv2_3x3_reduce = self.__conv(2, name='conv2/3x3_reduce', in_channels=64, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.conv2_3x3 = self.__conv(2, name='conv2/3x3', in_channels=64, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_3a_1x1 = self.__conv(2, name='inception_3a/1x1', in_channels=192, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3a_3x3_reduce = self.__conv(2, name='inception_3a/3x3_reduce', in_channels=192, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3a_5x5_reduce = self.__conv(2, name='inception_3a/5x5_reduce', in_channels=192, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3a_pool_proj = self.__conv(2, name='inception_3a/pool_proj', in_channels=192, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3a_3x3 = self.__conv(2, name='inception_3a/3x3', in_channels=96, out_channels=128, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_3a_5x5 = self.__conv(2, name='inception_3a/5x5', in_channels=16, out_channels=32, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_3b_1x1 = self.__conv(2, name='inception_3b/1x1', in_channels=256, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3b_3x3_reduce = self.__conv(2, name='inception_3b/3x3_reduce', in_channels=256, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3b_5x5_reduce = self.__conv(2, name='inception_3b/5x5_reduce', in_channels=256, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3b_pool_proj = self.__conv(2, name='inception_3b/pool_proj', in_channels=256, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_3b_3x3 = self.__conv(2, name='inception_3b/3x3', in_channels=128, out_channels=192, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_3b_5x5 = self.__conv(2, name='inception_3b/5x5', in_channels=32, out_channels=96, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_4a_1x1 = self.__conv(2, name='inception_4a/1x1', in_channels=480, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4a_3x3_reduce = self.__conv(2, name='inception_4a/3x3_reduce', in_channels=480, out_channels=96, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4a_5x5_reduce = self.__conv(2, name='inception_4a/5x5_reduce', in_channels=480, out_channels=16, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4a_pool_proj = self.__conv(2, name='inception_4a/pool_proj', in_channels=480, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4a_3x3 = self.__conv(2, name='inception_4a/3x3', in_channels=96, out_channels=208, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_4a_5x5 = self.__conv(2, name='inception_4a/5x5', in_channels=16, out_channels=48, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_4b_1x1 = self.__conv(2, name='inception_4b/1x1', in_channels=512, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4b_3x3_reduce = self.__conv(2, name='inception_4b/3x3_reduce', in_channels=512, out_channels=112, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4b_5x5_reduce = self.__conv(2, name='inception_4b/5x5_reduce', in_channels=512, out_channels=24, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4b_pool_proj = self.__conv(2, name='inception_4b/pool_proj', in_channels=512, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4b_3x3 = self.__conv(2, name='inception_4b/3x3', in_channels=112, out_channels=224, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_4b_5x5 = self.__conv(2, name='inception_4b/5x5', in_channels=24, out_channels=64, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_4c_1x1 = self.__conv(2, name='inception_4c/1x1', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4c_3x3_reduce = self.__conv(2, name='inception_4c/3x3_reduce', in_channels=512, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4c_5x5_reduce = self.__conv(2, name='inception_4c/5x5_reduce', in_channels=512, out_channels=24, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4c_pool_proj = self.__conv(2, name='inception_4c/pool_proj', in_channels=512, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4c_3x3 = self.__conv(2, name='inception_4c/3x3', in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_4c_5x5 = self.__conv(2, name='inception_4c/5x5', in_channels=24, out_channels=64, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_4d_1x1 = self.__conv(2, name='inception_4d/1x1', in_channels=512, out_channels=112, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4d_3x3_reduce = self.__conv(2, name='inception_4d/3x3_reduce', in_channels=512, out_channels=144, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4d_5x5_reduce = self.__conv(2, name='inception_4d/5x5_reduce', in_channels=512, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4d_pool_proj = self.__conv(2, name='inception_4d/pool_proj', in_channels=512, out_channels=64, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4d_3x3 = self.__conv(2, name='inception_4d/3x3', in_channels=144, out_channels=288, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_4d_5x5 = self.__conv(2, name='inception_4d/5x5', in_channels=32, out_channels=64, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_4e_1x1 = self.__conv(2, name='inception_4e/1x1', in_channels=528, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4e_3x3_reduce = self.__conv(2, name='inception_4e/3x3_reduce', in_channels=528, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4e_5x5_reduce = self.__conv(2, name='inception_4e/5x5_reduce', in_channels=528, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4e_pool_proj = self.__conv(2, name='inception_4e/pool_proj', in_channels=528, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_4e_3x3 = self.__conv(2, name='inception_4e/3x3', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_4e_5x5 = self.__conv(2, name='inception_4e/5x5', in_channels=32, out_channels=128, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_5a_1x1 = self.__conv(2, name='inception_5a/1x1', in_channels=832, out_channels=256, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5a_3x3_reduce = self.__conv(2, name='inception_5a/3x3_reduce', in_channels=832, out_channels=160, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5a_5x5_reduce = self.__conv(2, name='inception_5a/5x5_reduce', in_channels=832, out_channels=32, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5a_pool_proj = self.__conv(2, name='inception_5a/pool_proj', in_channels=832, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5a_3x3 = self.__conv(2, name='inception_5a/3x3', in_channels=160, out_channels=320, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_5a_5x5 = self.__conv(2, name='inception_5a/5x5', in_channels=32, out_channels=128, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.inception_5b_1x1 = self.__conv(2, name='inception_5b/1x1', in_channels=832, out_channels=384, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5b_3x3_reduce = self.__conv(2, name='inception_5b/3x3_reduce', in_channels=832, out_channels=192, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5b_5x5_reduce = self.__conv(2, name='inception_5b/5x5_reduce', in_channels=832, out_channels=48, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5b_pool_proj = self.__conv(2, name='inception_5b/pool_proj', in_channels=832, out_channels=128, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=True)
self.inception_5b_3x3 = self.__conv(2, name='inception_5b/3x3', in_channels=192, out_channels=384, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=True)
self.inception_5b_5x5 = self.__conv(2, name='inception_5b/5x5', in_channels=48, out_channels=128, kernel_size=(5, 5), stride=(1, 1), groups=1, bias=True)
self.loss3_classifier_1 = self.__dense(name = 'loss3/classifier_1', in_features = 1024, out_features = 205, bias = True)
def forward(self, x):
conv1_7x7_s2_pad = F.pad(x, (3, 3, 3, 3))
conv1_7x7_s2 = self.conv1_7x7_s2(conv1_7x7_s2_pad)
conv1_relu_7x7 = F.relu(conv1_7x7_s2)
pool1_3x3_s2_pad = F.pad(conv1_relu_7x7, (0, 1, 0, 1), value=float('-inf'))
pool1_3x3_s2, pool1_3x3_s2_idx = F.max_pool2d(pool1_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True)
pool1_norm1 = F.local_response_norm(pool1_3x3_s2, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0)
conv2_3x3_reduce = self.conv2_3x3_reduce(pool1_norm1)
conv2_relu_3x3_reduce = F.relu(conv2_3x3_reduce)
conv2_3x3_pad = F.pad(conv2_relu_3x3_reduce, (1, 1, 1, 1))
conv2_3x3 = self.conv2_3x3(conv2_3x3_pad)
conv2_relu_3x3 = F.relu(conv2_3x3)
conv2_norm2 = F.local_response_norm(conv2_relu_3x3, size=5, alpha=9.999999747378752e-05, beta=0.75, k=1.0)
pool2_3x3_s2_pad = F.pad(conv2_norm2, (0, 1, 0, 1), value=float('-inf'))
pool2_3x3_s2, pool2_3x3_s2_idx = F.max_pool2d(pool2_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True)
inception_3a_1x1 = self.inception_3a_1x1(pool2_3x3_s2)
inception_3a_3x3_reduce = self.inception_3a_3x3_reduce(pool2_3x3_s2)
inception_3a_5x5_reduce = self.inception_3a_5x5_reduce(pool2_3x3_s2)
inception_3a_pool_pad = F.pad(pool2_3x3_s2, (1, 1, 1, 1), value=float('-inf'))
inception_3a_pool, inception_3a_pool_idx = F.max_pool2d(inception_3a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_3a_relu_1x1 = F.relu(inception_3a_1x1)
inception_3a_relu_3x3_reduce = F.relu(inception_3a_3x3_reduce)
inception_3a_relu_5x5_reduce = F.relu(inception_3a_5x5_reduce)
inception_3a_pool_proj = self.inception_3a_pool_proj(inception_3a_pool)
inception_3a_3x3_pad = F.pad(inception_3a_relu_3x3_reduce, (1, 1, 1, 1))
inception_3a_3x3 = self.inception_3a_3x3(inception_3a_3x3_pad)
inception_3a_5x5_pad = F.pad(inception_3a_relu_5x5_reduce, (2, 2, 2, 2))
inception_3a_5x5 = self.inception_3a_5x5(inception_3a_5x5_pad)
inception_3a_relu_pool_proj = F.relu(inception_3a_pool_proj)
inception_3a_relu_3x3 = F.relu(inception_3a_3x3)
inception_3a_relu_5x5 = F.relu(inception_3a_5x5)
inception_3a_output = torch.cat((inception_3a_relu_1x1, inception_3a_relu_3x3, inception_3a_relu_5x5, inception_3a_relu_pool_proj,), 1)
inception_3b_1x1 = self.inception_3b_1x1(inception_3a_output)
inception_3b_3x3_reduce = self.inception_3b_3x3_reduce(inception_3a_output)
inception_3b_5x5_reduce = self.inception_3b_5x5_reduce(inception_3a_output)
inception_3b_pool_pad = F.pad(inception_3a_output, (1, 1, 1, 1), value=float('-inf'))
inception_3b_pool, inception_3b_pool_idx = F.max_pool2d(inception_3b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_3b_relu_1x1 = F.relu(inception_3b_1x1)
inception_3b_relu_3x3_reduce = F.relu(inception_3b_3x3_reduce)
inception_3b_relu_5x5_reduce = F.relu(inception_3b_5x5_reduce)
inception_3b_pool_proj = self.inception_3b_pool_proj(inception_3b_pool)
inception_3b_3x3_pad = F.pad(inception_3b_relu_3x3_reduce, (1, 1, 1, 1))
inception_3b_3x3 = self.inception_3b_3x3(inception_3b_3x3_pad)
inception_3b_5x5_pad = F.pad(inception_3b_relu_5x5_reduce, (2, 2, 2, 2))
inception_3b_5x5 = self.inception_3b_5x5(inception_3b_5x5_pad)
inception_3b_relu_pool_proj = F.relu(inception_3b_pool_proj)
inception_3b_relu_3x3 = F.relu(inception_3b_3x3)
inception_3b_relu_5x5 = F.relu(inception_3b_5x5)
inception_3b_output = torch.cat((inception_3b_relu_1x1, inception_3b_relu_3x3, inception_3b_relu_5x5, inception_3b_relu_pool_proj,), 1)
pool3_3x3_s2_pad = F.pad(inception_3b_output, (0, 1, 0, 1), value=float('-inf'))
pool3_3x3_s2, pool3_3x3_s2_idx = F.max_pool2d(pool3_3x3_s2_pad, kernel_size=(3, 3), stride=(2, 2), padding=0, ceil_mode=False, return_indices=True)
inception_4a_1x1 = self.inception_4a_1x1(pool3_3x3_s2)
inception_4a_3x3_reduce = self.inception_4a_3x3_reduce(pool3_3x3_s2)
inception_4a_5x5_reduce = self.inception_4a_5x5_reduce(pool3_3x3_s2)
inception_4a_pool_pad = F.pad(pool3_3x3_s2, (1, 1, 1, 1), value=float('-inf'))
inception_4a_pool, inception_4a_pool_idx = F.max_pool2d(inception_4a_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_4a_relu_1x1 = F.relu(inception_4a_1x1)
inception_4a_relu_3x3_reduce = F.relu(inception_4a_3x3_reduce)
inception_4a_relu_5x5_reduce = F.relu(inception_4a_5x5_reduce)
inception_4a_pool_proj = self.inception_4a_pool_proj(inception_4a_pool)
inception_4a_3x3_pad = F.pad(inception_4a_relu_3x3_reduce, (1, 1, 1, 1))
inception_4a_3x3 = self.inception_4a_3x3(inception_4a_3x3_pad)
inception_4a_5x5_pad = F.pad(inception_4a_relu_5x5_reduce, (2, 2, 2, 2))
inception_4a_5x5 = self.inception_4a_5x5(inception_4a_5x5_pad)
inception_4a_relu_pool_proj = F.relu(inception_4a_pool_proj)
inception_4a_relu_3x3 = F.relu(inception_4a_3x3)
inception_4a_relu_5x5 = F.relu(inception_4a_5x5)
inception_4a_output = torch.cat((inception_4a_relu_1x1, inception_4a_relu_3x3, inception_4a_relu_5x5, inception_4a_relu_pool_proj,), 1)
inception_4b_1x1 = self.inception_4b_1x1(inception_4a_output)
inception_4b_3x3_reduce = self.inception_4b_3x3_reduce(inception_4a_output)
inception_4b_5x5_reduce = self.inception_4b_5x5_reduce(inception_4a_output)
inception_4b_pool_pad = F.pad(inception_4a_output, (1, 1, 1, 1), value=float('-inf'))
inception_4b_pool, inception_4b_pool_idx = F.max_pool2d(inception_4b_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_4b_relu_1x1 = F.relu(inception_4b_1x1)
inception_4b_relu_3x3_reduce = F.relu(inception_4b_3x3_reduce)
inception_4b_relu_5x5_reduce = F.relu(inception_4b_5x5_reduce)
inception_4b_pool_proj = self.inception_4b_pool_proj(inception_4b_pool)
inception_4b_3x3_pad = F.pad(inception_4b_relu_3x3_reduce, (1, 1, 1, 1))
inception_4b_3x3 = self.inception_4b_3x3(inception_4b_3x3_pad)
inception_4b_5x5_pad = F.pad(inception_4b_relu_5x5_reduce, (2, 2, 2, 2))
inception_4b_5x5 = self.inception_4b_5x5(inception_4b_5x5_pad)
inception_4b_relu_pool_proj = F.relu(inception_4b_pool_proj)
inception_4b_relu_3x3 = F.relu(inception_4b_3x3)
inception_4b_relu_5x5 = F.relu(inception_4b_5x5)
inception_4b_output = torch.cat((inception_4b_relu_1x1, inception_4b_relu_3x3, inception_4b_relu_5x5, inception_4b_relu_pool_proj,), 1)
inception_4c_1x1 = self.inception_4c_1x1(inception_4b_output)
inception_4c_3x3_reduce = self.inception_4c_3x3_reduce(inception_4b_output)
inception_4c_5x5_reduce = self.inception_4c_5x5_reduce(inception_4b_output)
inception_4c_pool_pad = F.pad(inception_4b_output, (1, 1, 1, 1), value=float('-inf'))
inception_4c_pool, inception_4c_pool_idx = F.max_pool2d(inception_4c_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_4c_relu_1x1 = F.relu(inception_4c_1x1)
inception_4c_relu_3x3_reduce = F.relu(inception_4c_3x3_reduce)
inception_4c_relu_5x5_reduce = F.relu(inception_4c_5x5_reduce)
inception_4c_pool_proj = self.inception_4c_pool_proj(inception_4c_pool)
inception_4c_3x3_pad = F.pad(inception_4c_relu_3x3_reduce, (1, 1, 1, 1))
inception_4c_3x3 = self.inception_4c_3x3(inception_4c_3x3_pad)
inception_4c_5x5_pad = F.pad(inception_4c_relu_5x5_reduce, (2, 2, 2, 2))
inception_4c_5x5 = self.inception_4c_5x5(inception_4c_5x5_pad)
inception_4c_relu_pool_proj = F.relu(inception_4c_pool_proj)
inception_4c_relu_3x3 = F.relu(inception_4c_3x3)
inception_4c_relu_5x5 = F.relu(inception_4c_5x5)
inception_4c_output = torch.cat((inception_4c_relu_1x1, inception_4c_relu_3x3, inception_4c_relu_5x5, inception_4c_relu_pool_proj,), 1)
inception_4d_1x1 = self.inception_4d_1x1(inception_4c_output)
inception_4d_3x3_reduce = self.inception_4d_3x3_reduce(inception_4c_output)
inception_4d_5x5_reduce = self.inception_4d_5x5_reduce(inception_4c_output)
inception_4d_pool_pad = F.pad(inception_4c_output, (1, 1, 1, 1), value=float('-inf'))
inception_4d_pool, inception_4d_pool_idx = F.max_pool2d(inception_4d_pool_pad, kernel_size=(3, 3), stride=(1, 1), padding=0, ceil_mode=False, return_indices=True)
inception_4d_relu_1x1 = F.relu(inception_4d_1x1)
inception_4d_relu_3x3_reduce = F.relu(inception_4d_3x3_reduce)
inception_4d_relu_5x5_reduce = F.relu(inception_4d_5x5_reduce)
inception_4d_pool_proj = self.inception_4d_pool_proj(inception_4d_pool)
inception_4d_3x3_pad = F.pad(inception_4d_relu_3x3_reduce, (1, 1, 1, 1))
inception_4d_3x3 = self.inception_4d_3x3(inception_4d_3x3_pad)
inception_4d_5x5_pad | |
<reponame>kanzeparov/NuCypher
"""
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
from contextlib import suppress
from typing import Dict, ClassVar
from typing import Tuple
from typing import Union, List
from constant_sorrow import constants, default_constant_splitter
from eth_keys import KeyAPI as EthKeyAPI
from eth_utils import to_checksum_address, to_canonical_address
from umbral.keys import UmbralPublicKey
from umbral.signing import Signature
from nucypher.blockchain.eth.chains import Blockchain
from nucypher.crypto.api import encrypt_and_sign
from nucypher.crypto.kits import UmbralMessageKit
from nucypher.crypto.powers import CryptoPower, SigningPower, EncryptingPower, NoSigningPower, CryptoPowerUp
from nucypher.crypto.signing import signature_splitter, StrangerStamp, SignatureStamp
from nucypher.network.middleware import RestMiddleware
from nucypher.network.nicknames import nickname_from_seed
from nucypher.network.nodes import Learner
class Character(Learner):
"""
A base-class for any character in our cryptography protocol narrative.
"""
_default_crypto_powerups = None
_stamp = None
_crashed = False
from nucypher.network.protocols import SuspiciousActivity # Ship this exception with every Character.
from nucypher.crypto.signing import InvalidSignature
def __init__(self,
is_me: bool = True,
federated_only: bool = False,
blockchain: Blockchain = None,
checksum_address: bytes = constants.NO_BLOCKCHAIN_CONNECTION.bool_value(False),
network_middleware: RestMiddleware = None,
keyring_dir: str = None,
crypto_power: CryptoPower = None,
crypto_power_ups: List[CryptoPowerUp] = None,
*args, **kwargs
) -> None:
"""
Base class for Nucypher protocol actors.
PowerUps
========
:param crypto_power: A CryptoPower object; if provided, this will be the character's CryptoPower.
:param crypto_power_ups: If crypto_power is not provided, a new one will be made to consume all CryptoPowerUps.
If neither crypto_power nor crypto_power_ups are provided, we give this
Character all CryptoPowerUps listed in their _default_crypto_powerups
attribute.
:param is_me: Set this to True when you want this Character to represent
the owner of the configuration under which the program is being run.
A Character who is_me can do things that other Characters can't,
like run servers, sign messages, and decrypt messages which are
encrypted for them. Typically this will be True for exactly one
Character, but there are scenarios in which its imaginable to be
represented by zero Characters or by more than one Character.
"""
self.federated_only = federated_only # type: bool
#
# Powers
#
if crypto_power and crypto_power_ups:
raise ValueError("Pass crypto_power or crypto_power_ups (or neither), but not both.")
crypto_power_ups = crypto_power_ups or list() # type: list
if crypto_power:
self._crypto_power = crypto_power # type: CryptoPower
elif crypto_power_ups:
self._crypto_power = CryptoPower(power_ups=crypto_power_ups)
else:
self._crypto_power = CryptoPower(power_ups=self._default_crypto_powerups)
self._checksum_address = checksum_address
#
# Self-Character
#
if is_me is True:
if not self.federated_only:
self.blockchain = blockchain or Blockchain.connect()
self.keyring_dir = keyring_dir # type: str
self.treasure_maps = {} # type: dict
self.network_middleware = network_middleware or RestMiddleware()
#
# Signing Power
#
try:
signing_power = self._crypto_power.power_ups(SigningPower) # type: SigningPower
self._stamp = signing_power.get_signature_stamp() # type: SignatureStamp
except NoSigningPower:
self._stamp = constants.NO_SIGNING_POWER
#
# Learner
#
Learner.__init__(self,
network_middleware=network_middleware,
*args, **kwargs)
#
# Stranger-Character
#
else: # Feel like a stranger
if network_middleware is not None:
raise TypeError("Network middleware cannot be attached to a Stanger-Character.")
self._stamp = StrangerStamp(self.public_keys(SigningPower))
self.keyring_dir = constants.STRANGER
self.network_middleware = constants.STRANGER
#
# Decentralized
#
if not federated_only:
if not checksum_address:
raise ValueError("No checksum_address provided while running in a non-federated mode.")
else:
self._checksum_address = checksum_address # TODO: Check that this matches BlockchainPower
#
# Federated
#
elif federated_only:
try:
self._set_checksum_address() # type: str
except NoSigningPower:
self._checksum_address = constants.NO_BLOCKCHAIN_CONNECTION
if checksum_address:
# We'll take a checksum address, as long as it matches their singing key
if not checksum_address == self.checksum_public_address:
error = "Federated-only Characters derive their address from their Signing key; got {} instead."
raise self.SuspiciousActivity(error.format(checksum_address))
try:
self.nickname, self.nickname_metadata = nickname_from_seed(self.checksum_public_address)
except SigningPower.not_found_error:
if self.federated_only:
self.nickname = self.nickname_metadata = constants.NO_NICKNAME
else:
raise
def __eq__(self, other) -> bool:
return bytes(self.stamp) == bytes(other.stamp)
def __hash__(self):
return int.from_bytes(bytes(self.stamp), byteorder="big")
def __repr__(self):
r = "⇀{}↽ ({})"
r = r.format(self.nickname, self.checksum_public_address)
return r
@property
def name(self):
return self.__class__.__name__
@property
def rest_interface(self):
return self.rest_server.rest_url()
@property
def stamp(self):
if self._stamp is constants.NO_SIGNING_POWER:
raise NoSigningPower
elif not self._stamp:
raise AttributeError("SignatureStamp has not been set up yet.")
else:
return self._stamp
@property
def canonical_public_address(self):
return to_canonical_address(self.checksum_public_address)
@canonical_public_address.setter
def canonical_public_address(self, address_bytes):
self._checksum_address = to_checksum_address(address_bytes)
@property
def checksum_public_address(self):
if self._checksum_address is constants.NO_BLOCKCHAIN_CONNECTION:
self._set_checksum_address()
return self._checksum_address
@classmethod
def from_config(cls, config, **overrides) -> 'Character':
return config.produce(**overrides)
@classmethod
def from_public_keys(cls, powers_and_material: Dict, federated_only=True, *args, **kwargs) -> 'Character':
"""
Sometimes we discover a Character and, at the same moment,
learn the public parts of more of their powers. Here, we take a Dict
(powers_and_key_bytes) in the following format:
{CryptoPowerUp class: public_material_bytes}
Each item in the collection will have the CryptoPowerUp instantiated
with the public_material_bytes, and the resulting CryptoPowerUp instance
consumed by the Character.
# TODO: Need to be federated only until we figure out the best way to get the checksum_address in here.
"""
crypto_power = CryptoPower()
for power_up, public_key in powers_and_material.items():
try:
umbral_key = UmbralPublicKey(public_key)
except TypeError:
umbral_key = public_key
crypto_power.consume_power_up(power_up(pubkey=umbral_key))
return cls(is_me=False, federated_only=federated_only, crypto_power=crypto_power, *args, **kwargs)
def encrypt_for(self,
recipient: 'Character',
plaintext: bytes,
sign: bool = True,
sign_plaintext=True,
) -> tuple:
"""
Encrypts plaintext for recipient actor. Optionally signs the message as well.
:param recipient: The character whose public key will be used to encrypt
cleartext.
:param plaintext: The secret to be encrypted.
:param sign: Whether or not to sign the message.
:param sign_plaintext: When signing, the cleartext is signed if this is
True, Otherwise, the resulting ciphertext is signed.
:return: A tuple, (ciphertext, signature). If sign==False,
then signature will be NOT_SIGNED.
"""
signer = self.stamp if sign else constants.DO_NOT_SIGN
message_kit, signature = encrypt_and_sign(recipient_pubkey_enc=recipient.public_keys(EncryptingPower),
plaintext=plaintext,
signer=signer,
sign_plaintext=sign_plaintext
)
return message_kit, signature
def verify_from(self,
stranger: 'Character',
message_kit: Union[UmbralMessageKit, bytes],
signature: Signature = None,
decrypt=False,
delegator_verifying_key: UmbralPublicKey = None,
) -> tuple:
"""
Inverse of encrypt_for.
:param actor_that_sender_claims_to_be: A Character instance representing
the actor whom the sender claims to be. We check the public key
owned by this Character instance to verify.
:param message_kit: the message to be (perhaps decrypted and) verified.
:param signature: The signature to check.
:param decrypt: Whether or not to decrypt the messages.
:param delegator_verifying_key: A signing key from the original delegator.
This is used only when decrypting a MessageKit with an activated Capsule
to check that the KFrag used to create each attached CFrag is the
authentic KFrag initially created by the delegator.
:return: Whether or not the signature is valid, the decrypted plaintext
or NO_DECRYPTION_PERFORMED
"""
sender_pubkey_sig = stranger.stamp.as_umbral_pubkey()
with suppress(AttributeError):
if message_kit.sender_pubkey_sig:
if not message_kit.sender_pubkey_sig == sender_pubkey_sig:
raise ValueError(
"This MessageKit doesn't appear to have come from {}".format(stranger))
signature_from_kit = None
if decrypt:
# We are decrypting the message; let's do that first and see what the sig header says.
cleartext_with_sig_header = self.decrypt(message_kit, verifying_key=delegator_verifying_key)
sig_header, cleartext = default_constant_splitter(cleartext_with_sig_header, return_remainder=True)
if sig_header == constants.SIGNATURE_IS_ON_CIPHERTEXT:
# THe ciphertext is what is signed - note that for later.
message = message_kit.ciphertext
if not signature:
raise ValueError("Can't check a signature on the ciphertext if don't provide one.")
elif sig_header == constants.SIGNATURE_TO_FOLLOW:
# The signature follows in this cleartext - split it off.
signature_from_kit, cleartext = signature_splitter(cleartext,
return_remainder=True)
message = cleartext
else:
# Not decrypting - the message is the object passed in as a message kit. Cast it.
message = bytes(message_kit)
cleartext = constants.NO_DECRYPTION_PERFORMED
if signature and signature_from_kit:
if signature != signature_from_kit:
raise ValueError(
"The MessageKit has a Signature, but it's not the same one you provided. Something's up.")
signature_to_use = signature or signature_from_kit
if signature_to_use:
is_valid = signature_to_use.verify(message, sender_pubkey_sig)
if not is_valid:
raise stranger.InvalidSignature(
"Signature for message isn't valid: {}".format(signature_to_use))
else:
raise self.InvalidSignature("No signature provided -- signature presumed invalid.")
#
# Next we have decrypt() and sign() - these use the private
# keys of their respective powers; any character who has these powers can use these functions.
#
# If they don't have the correct Power, the appropriate | |
= re.sub(
r"CMD .*\n",
'CMD ["coverage", "run", "/usr/share/scalyr-agent-2/py/scalyr_agent/agent_main.py", '
'"--no-fork", "--no-change-user", "start"]',
new_dockerfile_source,
)
with open("Dockerfile", "w") as file:
file.write(new_dockerfile_source)
tar.add("Dockerfile")
tar.add(source_tarball)
tar.close()
# Write one file that has the contents of the script followed by the contents of the tarfile.
builder_fp = open(output_name, "w")
builder_fp.write(base_script)
builder_fp.write(tar_out.getvalue())
builder_fp.close()
# Make the script executable.
st = os.stat(output_name)
os.chmod(output_name, st.st_mode | stat.S_IEXEC | stat.S_IXGRP)
return output_name
def build_container_tarball(tarball_name, base_configs=None):
"""Builds the scalyr-agent-2 tarball for either Docker or Kubernetes in the current working directory.
@param tarball_name: The name for the output tarball (including the `.tar.gz` extension)
@param base_configs: The directory (relative to the top of the source tree) that contains the configuration
files to copy (such as the agent.json and agent.d directory). If None, then will use `config`.
@type tarball_name: str
@type base_configs: str
@return: The file name of the built tarball.
"""
build_common_docker_and_package_files(False, base_configs=base_configs)
# Need to create some docker specific files
make_directory("root/var/log/scalyr-agent-2/containers")
# Tar it up.
tar = tarfile.open(tarball_name, "w:gz")
original_dir = os.getcwd()
os.chdir("root")
# Do a manual walk over the contents of root so that we can use `addfile` to add the tarfile... which allows
# us to reset the owner/group to root. This might not be that portable to Windows, but for now, Docker is mainly
# Posix.
for root, dirs, files in os.walk("."):
to_copy = []
for name in dirs:
to_copy.append(os.path.join(root, name))
for name in files:
to_copy.append(os.path.join(root, name))
for x in to_copy:
file_entry = tar.gettarinfo(x)
file_entry.uname = "root"
file_entry.gname = "root"
file_entry.uid = 0
file_entry.gid = 0
if file_entry.isreg():
fp = open(file_entry.name, "rb")
tar.addfile(file_entry, fp)
fp.close()
else:
tar.addfile(file_entry)
os.chdir(original_dir)
tar.close()
return tarball_name
def build_rpm_or_deb_package(is_rpm, variant, version):
"""Builds either an RPM or Debian package in the current working directory.
@param is_rpm: True if an RPM should be built. Otherwise a Debian package will be built.
@param variant: If not None, will add the specified string into the iteration identifier for the package. This
allows for different packages to be built for the same type and same version.
@param version: The agent version.
@return: The file name of the built package.
"""
build_common_docker_and_package_files(True)
# Create the scriplets the RPM/Debian package invokes when uninstalling or upgrading.
create_scriptlets()
# Produce the change logs that we will embed in the package, based on the CHANGELOG.md in this directory.
create_change_logs()
if is_rpm:
package_type = "rpm"
else:
package_type = "deb"
# Only change the iteration label if we need to embed a variant.
if variant is not None:
iteration_arg = "--iteration 1.%s" % variant
else:
iteration_arg = ""
description = (
"Scalyr Agent 2 is the daemon process Scalyr customers run on their servers to collect metrics and "
"log files and transmit them to Scalyr."
)
run_command(
'fpm -s dir -a all -t %s -n "scalyr-agent-2" -v %s '
' --license "Apache 2.0" '
" --vendor Scalyr %s "
" --maintainer <EMAIL> "
" --provides scalyr-agent-2 "
' --description "%s" '
' --depends "python >= 2.4" '
' --depends "bash >= 3.2" '
" --url https://www.scalyr.com "
" --deb-user root "
" --deb-group root "
" --deb-changelog changelog-deb "
" --rpm-user root "
" --rpm-group root "
" --rpm-changelog changelog-rpm"
" --before-install preinstall.sh "
" --after-install postinstall.sh "
" --before-remove preuninstall.sh "
" --config-files /etc/scalyr-agent-2/agent.json "
" --directories /usr/share/scalyr-agent-2 "
" --directories /var/lib/scalyr-agent-2 "
" --directories /var/log/scalyr-agent-2 "
" -C root usr etc var" % (package_type, version, iteration_arg, description),
exit_on_fail=True,
command_name="fpm",
)
# We determine the artifact name in a little bit of loose fashion.. we just glob over the current
# directory looking for something either ending in .rpm or .deb. There should only be one package,
# so that is fine.
if is_rpm:
files = glob.glob("*.rpm")
else:
files = glob.glob("*.deb")
if len(files) != 1:
raise Exception(
"Could not find resulting rpm or debian package in the build directory."
)
return files[0]
def build_tarball_package(variant, version, no_versioned_file_name):
"""Builds the scalyr-agent-2 tarball in the current working directory.
@param variant: If not None, will add the specified string into the final tarball name. This allows for different
tarballs to be built for the same type and same version.
@param version: The agent version.
@param no_versioned_file_name: True if the version number should not be embedded in the artifact's file name.
@return: The file name of the built tarball.
"""
# Use build_base_files to build all of the important stuff in ./scalyr-agent-2
build_base_files()
# Build the rest of the directories required for the tarball install. Mainly, the log and data directories
# in the tarball itself where the running process will store its state.
make_directory("scalyr-agent-2/data")
make_directory("scalyr-agent-2/log")
make_directory("scalyr-agent-2/config/agent.d")
# Create a file named packageless. This signals to the agent that
# this a tarball install instead of an RPM/Debian install, which changes
# the default paths for th econfig, logs, data, etc directories. See
# configuration.py.
write_to_file("1", "scalyr-agent-2/packageless")
if variant is None:
base_archive_name = "scalyr-agent-%s" % version
else:
base_archive_name = "scalyr-agent-%s.%s" % (version, variant)
shutil.move("scalyr-agent-2", base_archive_name)
output_name = (
"%s.tar.gz" % base_archive_name
if not no_versioned_file_name
else "scalyr-agent.tar.gz"
)
# Tar it up.
tar = tarfile.open(output_name, "w:gz")
tar.add(base_archive_name)
tar.close()
return output_name
def build_base_files(base_configs="config"):
"""Build the basic structure for a package in a new directory scalyr-agent-2 in the current working directory.
This creates scalyr-agent-2 in the current working directory and then populates it with the basic structure
required by most of the packages.
It copies the source files, the certs, the configuration directories, etc. This will make sure to exclude
files like .pyc, .pyo, etc.
In the end, the structure will look like:
scalyr-agent-2:
py/scalyr_agent/ -- All the scalyr_agent source files
certs/ca_certs.pem -- The trusted SSL CA root list.
config/agent.json -- The configuration file.
bin/scalyr-agent-2 -- Symlink to the agent_main.py file to run the agent.
bin/scalyr-agent-2-config -- Symlink to config_main.py to run the configuration tool
build_info -- A file containing the commit id of the latest commit included in this package,
the time it was built, and other information.
@param base_configs: The directory (relative to the top of the source tree) that contains the configuration
files to copy (such as the agent.json and agent.d directory). If None, then will use `config`.
"""
original_dir = os.getcwd()
# This will return the parent directory of this file. We will use that to determine the path
# to files like scalyr_agent/ to copy the source files
agent_source_root = __source_root__
make_directory("scalyr-agent-2/py")
os.chdir("scalyr-agent-2")
make_directory("certs")
make_directory("bin")
make_directory("misc")
# Copy the version file. We copy it both to the root and the package root. The package copy is done down below.
shutil.copy(make_path(agent_source_root, "VERSION"), "VERSION")
# Copy the source files.
os.chdir("py")
shutil.copytree(make_path(agent_source_root, "scalyr_agent"), "scalyr_agent")
shutil.copytree(make_path(agent_source_root, "monitors"), "monitors")
os.chdir("monitors")
recursively_delete_files_by_name("README.md")
os.chdir("..")
shutil.copy(
make_path(agent_source_root, "VERSION"), os.path.join("scalyr_agent", "VERSION")
)
# Exclude certain files.
# TODO: Should probably use MANIFEST.in to do this, but don't know the Python-fu to do this yet.
#
# Don't include the tests directories. Also, don't include the .idea directory created by IDE.
recursively_delete_dirs_by_name("\.idea", "tests")
recursively_delete_files_by_name(
".*\.pyc", ".*\.pyo", ".*\.pyd", "all_tests\.py", ".*~"
)
os.chdir("..")
# Copy the config
if base_configs is not None:
config_path = base_configs
else:
config_path = "config"
shutil.copytree(make_path(agent_source_root, config_path), "config")
# Create the trusted CA root list.
os.chdir("certs")
cat_files(
glob_files(make_path(agent_source_root, "certs/*_root.pem")), "ca_certs.crt"
)
cat_files(
glob_files(make_path(agent_source_root, "certs/*_intermediate.pem")),
"intermediate_certs.pem",
)
for cert_file in glob_files(make_path(agent_source_root, "certs/*.pem")):
shutil.copy(cert_file, cert_file.split("/")[-1])
os.chdir("..")
# Misc extra files needed for some features.
os.chdir("misc")
# This docker file is needed by the `scalyr-agent-2-config --docker-create-custom-dockerfile` command. We
# put it in all distributions (not just the docker_tarball) in case a customer creates an imagine using a package.
shutil.copy(
make_path(agent_source_root, "docker/Dockerfile.custom_agent_config"),
"Dockerfile.custom_agent_config",
)
shutil.copy(
make_path(agent_source_root, "docker/Dockerfile.custom_k8s_config"),
"Dockerfile.custom_k8s_config",
)
os.chdir("..")
# Create symlinks for the two commands
os.chdir("bin")
make_soft_link("../py/scalyr_agent/agent_main.py", "scalyr-agent-2")
make_soft_link("../py/scalyr_agent/config_main.py", "scalyr-agent-2-config")
os.chdir("..")
write_to_file(get_build_info(), "build_info")
os.chdir(original_dir)
def make_directory(path):
"""Creates the specified directory including any parents that do not yet exist.
@param path: The path of the directory to create. This string can use a forward slash to separate path
components regardless of the | |
for balls, need to scale?#by default : 0.3?
sphers=[]
k=0
n='S'
AtmRadi = {"A":1.7,"N":1.54,"C":1.7,"P":1.7,"O":1.52,"S":1.85,"H":1.2}
if R == None : R = 0.
if scale == 0.0 : scale = 1.0
if mat == None : mat=create_Atoms_materials()
if name.find('balls') != (-1) : n='B'
if geom is not None:
coords=geom.getVertices()
else :
coords=x.coords
hiera = 'default'
mol = x[0].getParentOfType(Protein)
molname = name.split("_")[0]
if VERBOSE : print "molname ", molname,mol
Spline = getObject("spline"+molname)
for c in mol.chains:
spher=[]
oneparent = True
atoms = c.residues.atoms
parent=findatmParentHierarchie(atoms[0],n,hiera)
#print "finded",parent
for j in xrange(len(atoms.coords)):
#at=res.atoms[j]
at=atoms[j]
radius = at.radius
scaleFactor=float(R)+float(radius)*float(scale)
atN=at.name
#print atN
if atN[0] not in AtmRadi.keys(): atN="A"
fullname = at.full_name()
#print fullname
atC=at.coords#at._coords[0]
spher.append( c4d.BaseObject(INSTANCE) )
spher[j][1001]=iMe[atN[0]]
#spher[j][1001]=1
spher[j].SetName(n+"_"+fullname)#.replace(":","_")
sc = iMe[atN[0]][905].x #radius of parent mesh
#if sc != scaleFactor :
if n=='B' :
scale = 1.
spher[j][905]=c4d.Vector(float((1/sc)*scale),float((1/sc)*scale),float((1/sc)*scale))
#
if atN in ["CA","N","C"] and Spline != None and n == 'S':
pos= float(((j*1.) / Spline.GetPointCount()))
path=spher[j].MakeTag(Follow_PATH)
path[1001] = Spline
path[1000] = 1
path[1003] = pos
else : spher[j].SetPos(c4dv(atC))
texture = spher[j].MakeTag(c4d.Ttexture)
texture[1010] = mat[atN[0]]
p = findatmParentHierarchie(at,n,hiera)
#print "dinded",p
if parent != p :
cp = p
oneparent = False
parent = p
else :
cp = parent
#print "parent",cp
addObjectToScene(getCurrentScene(),spher[j],parent=cp)
toggleDisplay(spher[j],False)
k=k+1
if pb :
progressBar(j/len(coords)," cpk ")
#dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESS] = j/len(coords)
#dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESSFULLSIZE] = True
#c4d.StatusSetBar(j/len(coords))
update()
sphers.extend(spher)
if pb :
resetProgressBar(0)
return sphers
def spheresMesh(name,x,mat=None,scale=1.0,Res=32,R=None,join=0):
if scale == 0.0 : scale =1.
scale = scale *2.
spher=[]
if Res == 0 : Res = 10.
else : Res = Res *5.
k=0
if mat == None : mat=create_Atoms_materials()
#print len(x)
for j in range(len(x)): spher.append(None)
for j in range(len(x)):
#at=res.atoms[j]
at=x[j]
atN=at.name
#print atN
fullname = at.full_name()
#print fullname
atC=at._coords[0]
#if R !=None : rad=R
#elif AtmRadi.has_key(atN[0]) : rad=AtmRadi[atN[0]]
#else : rad=AtmRadi['H']
#print at.vdwRadius
rad=at.vdwRadius
#print rad
spher[j] = c4d.BaseObject(c4d.Osphere)
spher[j].SetName(fullname.replace(":","_"))
spher[j][PRIM_SPHERE_RAD] = float(rad)*float(scale)
spher[j].SetPos(c4d.Vector(float(atC[0]),float(atC[1]),float(atC[2])))
spher[j].MakeTag(c4d.Tphong)
# create a texture tag on the PDBgeometry object
#texture = spher[j].MakeTag(c4d.Ttexture)
#create the dedicayed material
#print mat[atN[0]]
#texture[1010] = mat[atN[0]]
#spher.append(me)
k=k+1
return spher
def display_CPK(mol,sel,display,needRedraw=False,quality=0,cpkRad=0.0,scaleFactor=1.0,useTree="default",dialog=None):
sc = getCurrentScene()
g = mol.geomContainer.geoms['cpk']
#print g
#name=selection+"_cpk"
#select=self.select(selection,negate=False, only=True, xor=False, log=0,
# intersect=False)
#print name,select
#sel=select.findType(Atom)
if not hasattr(g,"obj"): #if no mesh have to create it for evey atms
name=mol.name+"_cpk"
#print name
mesh=createBaseSphere(name="base_cpk",quality=quality,cpkRad=cpkRad,
scale=scaleFactor,parent=mol.geomContainer.masterGeom.obj)
ob=instancesAtomsSphere(name,mol.allAtoms,mesh,sc,scale=scaleFactor,
Res=quality,join=0,dialog=dialog)
addObjToGeom([ob,mesh],g)
for i,o in enumerate(ob):
# if dialog != None :
# dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESS] = j/len(coords)
# #dialog.bc[c4d.gui.BFM_STATUSBAR_PROGRESSFULLSIZE] = True
# dialog.set(dialog._progess,float(i/len(ob)))#dialog.bc)
# getCurrentScene().Message(c4d.MULTIMSG_UP)
# c4d.draw_views(c4d.DA_ONLY_ACTIVE_VIEW|c4d.DA_NO_THREAD|c4d.DA_NO_ANIMATION)
parent=mol.geomContainer.masterGeom.obj
hierarchy=parseObjectName(o)
if hierarchy != "" :
if useTree == 'perRes' :
parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
elif useTree == 'perAtom' :
parent = getObject(o.GetName().split("_")[1])
else :
parent = getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_cpk"])
addObjectToScene(sc,o,parent=parent)
toggleDisplay(o,False) #True per default
#elif hasattr(g,"obj") and display :
#updateSphereMesh(g,quality=quality,cpkRad=cpkRad,scale=scaleFactor)
#if needRedraw : updateSphereObj(g)
#if hasattr(g,"obj"):
else :
updateSphereMesh(g,quality=quality,cpkRad=cpkRad,scale=scaleFactor)
atoms=sel#findType(Atom) already done
for atms in atoms:
nameo = "S_"+atms.full_name()
o=getObject(nameo)#Blender.Object.Get (nameo)
if o != None :
toggleDisplay(o,display)
if needRedraw : updateObjectPos(o,atms.coords)
def getStickProperties(coord1,coord2):
x1 = float(coord1[0])
y1 = float(coord1[1])
z1 = float(coord1[2])
x2 = float(coord2[0])
y2 = float(coord2[1])
z2 = float(coord2[2])
laenge = math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2))
wsz = atan2((y1-y2), (z1-z2))
wz = acos((x1-x2)/laenge)
offset=c4d.Vector(float(z1+z2)/2,float(y1+y2)/2,float(x1+x2)/2)
v_2=c4d.Vector(float(z1-z2),float(y1-y2),float(x1-x2))
v_2.Normalize()
v_1=c4d.Vector(float(1.),float(0.),float(2.))
v_3=c4d.Vector.Cross(v_1,v_2)
v_3.Normalize()
v_1=c4d.Vector.Cross(v_2,v_3)
v_1.Normalize()
#from mglutil.math import rotax
#pmx=rotax.rotVectToVect([1.,0.,0.], [float(z1-z2),float(y1-y2),float(x1-x2)], i=None)
mx=c4d.Matrix(offset,v_1, v_2, v_3)
#mx=c4d.Matrix(c4d.Vector(float(pmx[0][0]),float(pmx[0][1]),float(pmx[0][2]),float(pmx[0][3])),
#print laenge
return laenge,mx
def instancesCylinder(name,points,faces,radii,mesh,colors,scene,parent=None):
cyls=[]
mat = None
if len(colors) == 1:
mat = retrieveColorMat(colors[0])
if mat == None:
mat = addMaterial('mat_'+name,colors[0])
for i in range(len(faces)):
laenge,mx=getStickProperties(points[faces[i][0]],points[faces[i][1]])
cyls.append(c4d.BaseObject(INSTANCE))
cyls[i][1001]=mesh
cyls[i].SetName(name+str(i))
#orient and scale
if DEBUG : print name+str(i)
cyls[i].SetMg(mx)
cyls[i][905]=c4d.Vector(float(radii[i]),float(radii[i]),float(radii[i]))
cyls[i][905,1001]=float(laenge)
texture = cyls[i].MakeTag(c4d.Ttexture)
if mat == None : mat = addMaterial("matcyl"+str(i),colors[i])
texture[1010] = mat#mat[bl.retrieveColorName(sphColors[i])]
addObjectToScene(scene,cyls[i],parent=parent)
if DEBUG : print "ok"
return cyls
def updateTubeMesh(geom,cradius=1.0,quality=0):
mesh=geom.mesh.GetDown()#should be the cylinder
#mesh[5000]=cradius
cradius = cradius*1/0.2
mesh[905]=c4d.Vector(float(cradius),1.,float(cradius))
mesh.Message(c4d.MSG_UPDATE)
#pass
def updateTubeObjs(g,bicyl=False):
#problem when ds slection....
if not hasattr(g,'obj') : return
newpoints=g.getVertices()
newfaces=g.getFaces()
#print "upadteObjSpheres"
for i,o in enumerate(g.obj):
laenge,mx=getStickProperties(newpoints[newfaces[i][0]],newpoints[newfaces[i][1]])
o.SetMl(mx)
o[905,1001]=float(laenge)
parentPos = getPosUntilRoot(o)#parent.GetPos()
currentPos = o.GetPos()
o.SetPos(currentPos - parentPos)
def updateTubeObj(atm1,atm2,bicyl=False):
c0=numpy.array(atm1.coords)
c1=numpy.array(atm2.coords)
if bicyl :
vect = c1 - c0
name1="T_"+atm1.full_name()+"_"+atm2.name
name2="T_"+atm2.full_name()+"_"+atm1.name
o=getObject(name1)
updateOneSctick(o,c0,(c0+(vect/2.)))
o=getObject(name2)
updateOneSctick(o,(c0+(vect/2.)),c1)
else :
name="T_"+atm1.name+str(atm1.number)+"_"+atm2.name+str(atm2.number)
o=getObject(name)
updateOneSctick(o,c0,c1)
def updateOneSctick(o,coord1,coord2):
laenge,mx=getStickProperties(coord1,coord2)
o.SetMl(mx)
o[905,1001]=float(laenge)
parentPos = getPosUntilRoot(o)#parent.GetPos()
currentPos = o.GetPos()
o.SetPos(currentPos - parentPos)
def changeR(txt):
rname = txt[0:3]
rnum = txt[3:]
if rname not in ResidueSetSelector.r_keyD.keys() :
rname=rname.replace(" ","")
return rname[1]+rnum
else :
r1n=ResidueSetSelector.r_keyD[rname]
return r1n+rnum
def biStick(atm1,atm2,hiera,instance):
#again name problem.....
#need to add the molecule name
mol=atm1.getParentOfType(Protein)
stick=[]
c0=numpy.array(atm1.coords)
c1=numpy.array(atm2.coords)
vect = c1 - c0
n1=atm1.full_name().split(":")
n2=atm2.full_name().split(":")
name1="T_"+mol.name+"_"+n1[1]+"_"+changeR(n1[2])+"_"+n1[3]+"_"+atm2.name
name2="T_"+mol.name+"_"+n2[1]+"_"+changeR(n2[2])+"_"+n2[3]+"_"+atm1.name
# name1="T_"+n1[1]+"_"+n1[2]+"_"+n1[3]+"_"+atm2.name
# name2="T_"+n2[1]+"_"+n2[2]+"_"+n2[3]+"_"+atm1.name
#name1="T_"+atm1.full_name()+"_"+atm2.name
#name2="T_"+atm2.full_name()+"_"+atm1.name
laenge,mx=getStickProperties(c0,(c0+(vect/2.)))
stick.append(c4d.BaseObject(INSTANCE))
stick[0][1001]=instance
stick[0].SetMg(mx)
stick[0][905,1001]=float(laenge)
stick[0].SetName(name1)
texture=stick[0].MakeTag(c4d.Ttexture)
mat=getCurrentScene().SearchMaterial(atm1.name[0])
if mat == None :
mat = addMaterial(atm1.name[0],[0.,0.,0.])
texture[1010]=mat
laenge,mx=getStickProperties((c0+(vect/2.)),c1)
stick.append(c4d.BaseObject(INSTANCE))
stick[1][1001]=instance
stick[1].SetMg(mx)
stick[1][905,1001]=float(laenge)
stick[1].SetName(name2)
texture=stick[1].MakeTag(c4d.Ttexture)
mat=getCurrentScene().SearchMaterial(atm2.name[0])
if mat == None :
mat = addMaterial(atm2.name[0],[0.,0.,0.])
texture[1010]=mat
#parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
parent = findatmParentHierarchie(atm1,'B',hiera)
addObjectToScene(getCurrentScene(),stick[0],parent=parent)
addObjectToScene(getCurrentScene(),stick[1],parent=parent)
return stick
def Tube(set,atms,points,faces,doc,mat=None,res=32,size=0.25,sc=1.,join=0,
instance=None,hiera = 'perRes',bicyl=False,pb=False):
sticks=[]
bonds, atnobnd = set.bonds
if instance == None:
mol = atms[0].top
parent=newEmpty(mol.name+"_b_sticks")
addObjectToScene(getCurrentScene(),parent,parent=mol.geomContainer.masterGeom.obj)
toggleDisplay(parent,False)
instance=newEmpty(mol.name+"_b_sticks_shape")
addObjectToScene(getCurrentScene(),instance,parent=parent)
cyl=c4d.BaseObject(CYLINDER)
cyl.SetName(mol.name+"_b_sticks_o")
cyl[5000]= 0.2 #radius
cyl[5005]= 1. #lenght
cyl[5008]= res #subdivision
cyl.MakeTag(c4d.Tphong)
addObjectToScene(getCurrentScene(),cyl,parent=instance)
for i,bond in enumerate(bonds):
if bicyl :
sticks.extend(biStick(bond.atom1,bond.atom2,hiera,instance))
else :
#have to make one cylinder / bonds
#and put the gradiant mat on it
pass
if pb :
progressBar(i/len(bonds)," sticks ")
if pb :
resetProgressBar(0)
return [sticks,instance]
def oldTube(set,atms,points,faces,doc,mat=None,res=32,size=0.25,sc=1.,join=0,instance=None,hiera = 'perRes'):
bonds, atnobnd = set.bonds
backbone = ['N', 'CA', 'C', 'O']
stick=[]
tube=[]
#size=size*2.
#coord1=x[0].atms[x[0].atms.CApos()].xyz() #x.xyz()[i].split()
#coord2=x[1].atms[x[1].atms.CApos()].xyz() #x.xyz()[i+1].split()
#print len(points)
#print len(faces)
#print len(atms)
atm1=bonds[0].atom1#[faces[0][0]]
atm2=bonds[0].atom2#[faces[0][1]]
#name="T_"+atm1.name+str(atm1.number)+"_"+atm2.name+str(atm2.number)
name="T_"+atm1.full_name()+"_"+atm2.name
mol=atm1.getParentOfType(Protein)
laenge,mx=getStickProperties(points[faces[0][0]],points[faces[0][1]])
if mat == None : mat=create_sticks_materials()
if instance == None :
stick.append(c4d.BaseObject(CYLINDER))#(res, size, laenge/sc) #1. CAtrace, 0.25 regular |sc=1 CATrace, 2 regular
stick[0].SetMg(mx)
stick[0][5005]=laenge/sc#size
stick[0][5000]=size#radius
stick[0][5008]=res#resolution
stick[0][5006]=2#heght segment
else :
stick.append(c4d.BaseObject(INSTANCE))
stick[0][1001]=instance
stick[0].SetMg(mx)
stick[0][905,1001]=float(laenge)
texture=stick[0].MakeTag(c4d.Ttexture)
#print atms[faces[0][0]].name[0]+atms[faces[0][1]].name[0]
name1=atms[faces[0][0]].name[0]
name2=atms[faces[0][1]].name[0]
if name1 not in AtmRadi.keys(): name1="A"
if name2 not in AtmRadi.keys(): name2="A"
texture[1010]=mat[name1+name2]
stick[0].SetName(name)
#stick[0].SetPos(c4d.Vector(float(z1+z2)/2,float(y1+y2)/2,float(x1+x2)/2))
#stick[0].set_rot(c4d.Vector(float(wz),float(0),float(wsz)))
#stick[0][904,1000] = wz #RY/RH
#stick[0][904,1002] = wsz #RZ/RB
stick[0].MakeTag(c4d.Tphong)
hierarchy=parseObjectName("B_"+atm1.full_name())
#parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
if hiera == 'perRes' :
parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
elif hiera == 'perAtom' :
if atm1.name in backbone :
parent = getObject(atm1.full_name()+"_bond")
else :
parent = getObject(atm1.full_name()+"_sbond")
else :
parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
addObjectToScene(doc,stick[0],parent=parent)
for i in range(1,len(faces)):
atm1=bonds[i].atom1#[faces[i][0]]
atm2=bonds[i].atom2#[faces[i][1]]
#name="T_"+atm1.name+str(atm1.number)+"_"+atm2.name+str(atm2.number)
name="T_"+atm1.full_name()+"_"+atm2.name
laenge,mx=getStickProperties(points[faces[i][0]],points[faces[i][1]])
if instance == None :
stick.append(c4d.BaseObject(CYLINDER))#(res, size, laenge/sc) #1. CAtrace, 0.25 regular |sc=1 CATrace, 2 regular
stick[i].SetMl(mx)
stick[i][5005]=laenge/sc#radius
stick[i][5000]=size#height/size
stick[i][5008]=res#resolution rotation segment
stick[i][5006]=2#heght segment
else :
stick.append(c4d.BaseObject(INSTANCE))
stick[i][1001]=instance
stick[i].SetMl(mx)
stick[i][905,1001]=float(laenge)
texture=stick[i].MakeTag(c4d.Ttexture)
#print i,i+1
name1=atms[faces[i][0]].name[0]
name2=atms[faces[i][1]].name[0]
if name1 not in AtmRadi.keys(): name1="A"
if name2 not in AtmRadi.keys(): name2="A"
if i < len(atms) :
#print name1+name2
texture[1010]=mat[name1+name2]
else :
texture[1010]=mat[name1+name2]
stick[i].SetName(name)
#stick[i].SetPos(c4d.Vector(float(z1+z2)/2,float(y1+y2)/2,float(x1+x2)/2))
#stick[i].set_rot(c4d.Vector(float(wz),float(0.),float(wsz)))
stick[i].SetMl(mx)
stick[i].MakeTag(c4d.Tphong)
hierarchy=parseObjectName("B_"+atm1.full_name())
#parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
if hiera == 'perRes' :
parent = getObject(mol.geomContainer.masterGeom.res_obj[hierarchy[2]])
elif hiera == 'perAtom' :
if atm1.name in backbone :
parent = getObject(atm1.full_name()+"_bond")
else :
parent = getObject(atm1.full_name()+"_sbond")
else :
parent=getObject(mol.geomContainer.masterGeom.chains_obj[hierarchy[1]+"_balls"])
addObjectToScene(doc,stick[i],parent=parent)
#if join==1 :
# stick[0].join(stick[1:])
# for ind in range(1,len(stick)):
#obj[0].join([obj[ind]])
# scn.unlink(stick[ind])
#obj[0].setName(name)
return [stick]
def c4dv(points):
return c4d.Vector(float(points[2]),float(points[1]),float(points[0]))
#return c4d.Vector(float(points[0]),float(points[1]),float(points[2]))
def vc4d(v):
return [v.z,v.y,v.x]
def getCoordinateMatrix(pos,direction):
offset=pos
v_2=direction
v_2.Normalize()
v_1=c4d.Vector(float(1.),float(0.),float(0.))
v_3=c4d.Vector.Cross(v_1,v_2)
v_3.Normalize()
v_1=c4d.Vector.Cross(v_2,v_3)
v_1.Normalize()
#from mglutil.math import rotax
#pmx=rotax.rotVectToVect([1.,0.,0.], [float(z1-z2),float(y1-y2),float(x1-x2)], i=None)
return c4d.Matrix(offset,v_1, v_2, v_3)
def getCoordinateMatrixBis(pos,v1,v2):
offset=c4dv(pos)
v_2=c4dv(v2)
v_1=c4dv(v1)
v_3=c4d.Vector.Cross(v_1,v_2)
v_3.Normalize()
#from mglutil.math import rotax
#pmx=rotax.rotVectToVect([1.,0.,0.], [float(z1-z2),float(y1-y2),float(x1-x2)], i=None)
return c4d.Matrix(offset,v_1, v_2, v_3)
def loftnurbs(name,mat=None):
loft=c4d.BaseObject(LOFTNURBS)
loft[1008]=0 #adaptive UV false
loft.SetName(name)
loft.MakeTag(c4d.Tphong)
texture = loft.MakeTag(c4d.Ttexture)
texture[1004]=6 #UVW Mapping
#create the dedicayed material
if mat == None :
texture[1010] = create_loft_material(name='mat_'+name)
else : texture[1010] = mat
return loft
def sweepnurbs(name,mat=None):
loft=c4d.BaseObject(SWEEPNURBS)
loft.SetName(name)
loft.MakeTag(c4d.Tphong)
texture = loft.MakeTag(c4d.Ttexture)
#create the dedicayed material
if mat == None :
texture[1010] = create_loft_material(name='mat_'+name)
else : texture[1010] = mat
return loft
def addShapeToNurb(loft,shape,position=-1):
list_shape=loft.GetChilds()
shape.insert_after(list_shape[position])
#def createShapes2D()
# sh=c4d.BaseObject(dshape)
def spline(name, points,close=0,type=1,scene=None,parent=None):
spline=c4d.BaseObject(c4d.Ospline)
spline[1000]=type
spline[1002]=close
spline.SetName(name)
spline.ResizeObject(int(len(points)))
for i,p in enumerate(points):
spline.SetPoint(i, c4dv(p))
if scene != None :
addObjectToScene(scene,spline,parent=parent)
return spline,None
def update_spline(name,new_points):
spline=getCurrentScene().SearchObject(name)
if spline is None : return False
spline.ResizeObject(int(len(new_points)))
for i,p in enumerate(new_points):
spline.SetPoint(i, c4dv(p))
return True
def createShapes2Dspline(doc=None,parent=None):
circle=c4d.BaseObject(CIRCLE)
circle[2012]=float(0.3)
circle[2300]=1
if doc : addObjectToScene(doc,circle,parent=parent )
rectangle=c4d.BaseObject(RECTANGLE)
rectangle[2060]=float(2.2)
rectangle[2061]=float(0.7)
rectangle[2300]=1
if doc : addObjectToScene(doc,rectangle,parent=parent )
fourside=c4d.BaseObject(FOURSIDE)
fourside[2121]=float(2.5)
fourside[2122]=float(0.9)
fourside[2300]=1
if doc : addObjectToScene(doc,fourside,parent=parent )
shape2D={}
pts=[[0,0,0],[0,1,0],[0,1,1],[0,0,1]]
#helixshape
helixshape=fourside.get_real_spline()#spline('helix',pts,close=1,type=2)#AKIMA
helixshape.SetName('helix')
shape2D['Heli']=helixshape
#sheetshape
sheetshape=rectangle.get_real_spline()#spline('sheet',pts,close=1,type=0)#LINEAR
sheetshape.SetName('sheet')
shape2D['Shee']=sheetshape
#strandshape
strandshape=sheetshape.GetClone()
strandshape.SetName('strand')
shape2D['Stra']=strandshape
#coilshape
coilshape=circle.get_real_spline()#spline('coil',pts,close=1,type=4)#BEZIER
coilshape.SetName('coil')
shape2D['Coil']=coilshape
#turnshape
turnshape=coilshape.GetClone()
turnshape.SetName('turn')
shape2D['Turn']=turnshape
if doc :
for o in shape2D.values() :
addObjectToScene(doc,o,parent=parent )
return shape2D,[circle,rectangle,fourside,helixshape,sheetshape,strandshape,coilshape,turnshape]
def Circle(name, rad=1.):
circle=c4d.BaseObject(CIRCLE)
circle.SetName(name)
circle[2012]=float(rad)
circle[2300]=0
return circle
def createShapes2D(doc=None,parent=None):
if doc is None :
doc | |
<filename>Alignment/OfflineValidation/python/TkAlAllInOneTool/dataset.py
from __future__ import print_function
from __future__ import absolute_import
# idea stolen from:
# http://cmssw.cvs.cern.ch/cgi-bin/cmssw.cgi/CMSSW/
# PhysicsTools/PatAlgos/python/tools/cmsswVersionTools.py
from builtins import range
import bisect
import datetime
import json
import os
import re
import sys
import Utilities.General.cmssw_das_client as das_client
from FWCore.PythonUtilities.LumiList import LumiList
from .helperFunctions import cache
from .TkAlExceptions import AllInOneError
class Dataset(object):
def __init__( self, datasetName, dasLimit = 0, tryPredefinedFirst = True,
cmssw = os.environ["CMSSW_BASE"], cmsswrelease = os.environ["CMSSW_RELEASE_BASE"],
magneticfield = None, dasinstance = None):
self.__name = datasetName
self.__origName = datasetName
self.__dasLimit = dasLimit
self.__dasinstance = dasinstance
self.__cmssw = cmssw
self.__cmsswrelease = cmsswrelease
self.__firstusedrun = None
self.__lastusedrun = None
self.__parentDataset = None
# check, if dataset name matches CMS dataset naming scheme
if re.match( r'/.+/.+/.+', self.__name ):
self.__official = True
fileName = "Dataset" + self.__name.replace("/","_") + "_cff.py"
else:
self.__official = False
fileName = self.__name + "_cff.py"
searchPath1 = os.path.join( self.__cmssw, "python",
"Alignment", "OfflineValidation",
fileName )
searchPath2 = os.path.join( self.__cmssw, "src",
"Alignment", "OfflineValidation",
"python", fileName )
searchPath3 = os.path.join( self.__cmsswrelease,
"python", "Alignment",
"OfflineValidation", fileName )
if self.__official and not tryPredefinedFirst:
self.__predefined = False
elif os.path.exists( searchPath1 ):
self.__predefined = True
self.__filename = searchPath1
elif os.path.exists( searchPath2 ):
msg = ("The predefined dataset '%s' does exist in '%s', but "
"you need to run 'scram b' first."
%( self.__name, searchPath2 ))
if self.__official:
print(msg)
print("Getting the data from DAS again. To go faster next time, run scram b.")
else:
raise AllInOneError( msg )
elif os.path.exists( searchPath3 ):
self.__predefined = True
self.__filename = searchPath3
elif self.__official:
self.__predefined = False
else:
msg = ("The predefined dataset '%s' does not exist. Please "
"create it first or check for typos."%( self.__name ))
raise AllInOneError( msg )
if self.__predefined and self.__official:
self.__name = "Dataset" + self.__name.replace("/","_")
if magneticfield is not None:
try:
magneticfield = float(magneticfield)
except ValueError:
raise AllInOneError("Bad magneticfield {} which can't be converted to float".format(magneticfield))
self.__inputMagneticField = magneticfield
self.__dataType = self.__getDataType()
self.__magneticField = self.__getMagneticField()
def __chunks( self, theList, n ):
""" Yield successive n-sized chunks from theList.
"""
for i in range( 0, len( theList ), n ):
yield theList[i:i+n]
__source_template= ("%(header)s"
"%(importCms)s"
"import FWCore.PythonUtilities.LumiList as LumiList\n\n"
"%(goodLumiSecStr)s"
"readFiles = cms.untracked.vstring()\n"
"secFiles = cms.untracked.vstring()\n"
"%(process)ssource = cms.Source(\"PoolSource\",\n"
"%(lumiStr)s"
"%(tab)s secondaryFileNames ="
"secFiles,\n"
"%(tab)s fileNames = readFiles\n"
")\n"
"%(files)s\n"
"%(lumiSecExtend)s\n"
"%(process)smaxEvents = cms.untracked.PSet( "
"input = cms.untracked.int32(int(%(nEvents)s)) )\n"
"%(skipEventsString)s\n")
__dummy_source_template = ("readFiles = cms.untracked.vstring()\n"
"secFiles = cms.untracked.vstring()\n"
"%(process)ssource = cms.Source(\"PoolSource\",\n"
"%(tab)s secondaryFileNames ="
"secFiles,\n"
"%(tab)s fileNames = readFiles\n"
")\n"
"readFiles.extend(['dummy_File.root'])\n"
"%(process)smaxEvents = cms.untracked.PSet( "
"input = cms.untracked.int32(int(%(nEvents)s)) )\n"
"%(skipEventsString)s\n")
def __lumiSelectionSnippet( self, jsonPath = None, firstRun = None, lastRun = None ):
lumiSecExtend = ""
if firstRun or lastRun or jsonPath:
if not jsonPath:
selectedRunList = self.__getRunList()
if firstRun:
selectedRunList = [ run for run in selectedRunList \
if self.__findInJson(run, "run_number") >= firstRun ]
if lastRun:
selectedRunList = [ run for run in selectedRunList \
if self.__findInJson(run, "run_number") <= lastRun ]
lumiList = [ str( self.__findInJson(run, "run_number") ) + ":1-" \
+ str( self.__findInJson(run, "run_number") ) + ":max" \
for run in selectedRunList ]
splitLumiList = list( self.__chunks( lumiList, 255 ) )
else:
theLumiList = None
try:
theLumiList = LumiList ( filename = jsonPath )
except ValueError:
pass
if theLumiList is not None:
allRuns = theLumiList.getRuns()
runsToRemove = []
for run in allRuns:
if firstRun and int( run ) < firstRun:
runsToRemove.append( run )
if lastRun and int( run ) > lastRun:
runsToRemove.append( run )
theLumiList.removeRuns( runsToRemove )
splitLumiList = list( self.__chunks(
theLumiList.getCMSSWString().split(','), 255 ) )
if not (splitLumiList and splitLumiList[0] and splitLumiList[0][0]):
splitLumiList = None
else:
with open(jsonPath) as f:
jsoncontents = f.read()
if "process.source.lumisToProcess" in jsoncontents:
msg = "%s is not a json file, but it seems to be a CMSSW lumi selection cff snippet. Trying to use it" % jsonPath
if firstRun or lastRun:
msg += ("\n (after applying firstRun and/or lastRun)")
msg += ".\nPlease note that, depending on the format of this file, it may not work as expected."
msg += "\nCheck your config file to make sure that it worked properly."
print(msg)
runlist = self.__getRunList()
if firstRun or lastRun:
self.__firstusedrun = -1
self.__lastusedrun = -1
jsoncontents = re.sub(r"\d+:(\d+|max)(-\d+:(\d+|max))?", self.getForceRunRangeFunction(firstRun, lastRun), jsoncontents)
jsoncontents = (jsoncontents.replace("'',\n","").replace("''\n","")
.replace('"",\n','').replace('""\n',''))
self.__firstusedrun = max(self.__firstusedrun, int(self.__findInJson(runlist[0],"run_number")))
self.__lastusedrun = min(self.__lastusedrun, int(self.__findInJson(runlist[-1],"run_number")))
if self.__lastusedrun < self.__firstusedrun:
jsoncontents = None
else:
self.__firstusedrun = int(self.__findInJson(runlist[0],"run_number"))
self.__lastusedrun = int(self.__findInJson(runlist[-1],"run_number"))
lumiSecExtend = jsoncontents
splitLumiList = None
else:
raise AllInOneError("%s is not a valid json file!" % jsonPath)
if splitLumiList and splitLumiList[0] and splitLumiList[0][0]:
lumiSecStr = [ "',\n'".join( lumis ) \
for lumis in splitLumiList ]
lumiSecStr = [ "lumiSecs.extend( [\n'" + lumis + "'\n] )" \
for lumis in lumiSecStr ]
lumiSecExtend = "\n".join( lumiSecStr )
runlist = self.__getRunList()
self.__firstusedrun = max(int(splitLumiList[0][0].split(":")[0]), int(self.__findInJson(runlist[0],"run_number")))
self.__lastusedrun = min(int(splitLumiList[-1][-1].split(":")[0]), int(self.__findInJson(runlist[-1],"run_number")))
elif lumiSecExtend:
pass
else:
msg = "You are trying to run a validation without any runs! Check that:"
if firstRun or lastRun:
msg += "\n - firstRun/begin and lastRun/end are correct for this dataset, and there are runs in between containing data"
if jsonPath:
msg += "\n - your JSON file is correct for this dataset, and the runs contain data"
if (firstRun or lastRun) and jsonPath:
msg += "\n - firstRun/begin and lastRun/end are consistent with your JSON file"
raise AllInOneError(msg)
else:
if self.__inputMagneticField is not None:
pass #never need self.__firstusedrun or self.__lastusedrun
else:
runlist = self.__getRunList()
self.__firstusedrun = int(self.__findInJson(self.__getRunList()[0],"run_number"))
self.__lastusedrun = int(self.__findInJson(self.__getRunList()[-1],"run_number"))
return lumiSecExtend
def __fileListSnippet(self, crab=False, parent=False, firstRun=None, lastRun=None, forcerunselection=False):
if crab:
files = ""
else:
splitFileList = list( self.__chunks( self.fileList(firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) )
if not splitFileList:
raise AllInOneError("No files found for dataset {}. Check the spelling, or maybe specify another das instance?".format(self.__name))
fileStr = [ "',\n'".join( files ) for files in splitFileList ]
fileStr = [ "readFiles.extend( [\n'" + files + "'\n] )" \
for files in fileStr ]
files = "\n".join( fileStr )
if parent:
splitParentFileList = list( self.__chunks( self.fileList(parent=True, firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) )
parentFileStr = [ "',\n'".join( parentFiles ) for parentFiles in splitParentFileList ]
parentFileStr = [ "secFiles.extend( [\n'" + parentFiles + "'\n] )" \
for parentFiles in parentFileStr ]
parentFiles = "\n".join( parentFileStr )
files += "\n\n" + parentFiles
return files
def __createSnippet( self, jsonPath = None, begin = None, end = None,
firstRun = None, lastRun = None, repMap = None,
crab = False, parent = False ):
if firstRun:
firstRun = int( firstRun )
if lastRun:
lastRun = int( lastRun )
if ( begin and firstRun ) or ( end and lastRun ):
msg = ( "The Usage of "
+ "'begin' & 'firstRun' " * int( bool( begin and
firstRun ) )
+ "and " * int( bool( ( begin and firstRun ) and
( end and lastRun ) ) )
+ "'end' & 'lastRun' " * int( bool( end and lastRun ) )
+ "is ambigous." )
raise AllInOneError( msg )
if begin or end:
( firstRun, lastRun ) = self.convertTimeToRun(
begin = begin, end = end, firstRun = firstRun,
lastRun = lastRun )
if ( firstRun and lastRun ) and ( firstRun > lastRun ):
msg = ( "The lower time/runrange limit ('begin'/'firstRun') "
"chosen is greater than the upper time/runrange limit "
"('end'/'lastRun').")
raise AllInOneError( msg )
lumiSecExtend = self.__lumiSelectionSnippet(jsonPath=jsonPath, firstRun=firstRun, lastRun=lastRun)
lumiStr = goodLumiSecStr = ""
if lumiSecExtend:
goodLumiSecStr = "lumiSecs = cms.untracked.VLuminosityBlockRange()\n"
lumiStr = " lumisToProcess = lumiSecs,\n"
files = self.__fileListSnippet(crab=crab, parent=parent, firstRun=firstRun, lastRun=lastRun, forcerunselection=False)
theMap = repMap
theMap["files"] = files
theMap["json"] = jsonPath
theMap["lumiStr"] = lumiStr
theMap["goodLumiSecStr"] = goodLumiSecStr%( theMap )
theMap["lumiSecExtend"] = lumiSecExtend
if crab:
dataset_snippet = self.__dummy_source_template%( theMap )
else:
dataset_snippet = self.__source_template%( theMap )
return dataset_snippet
def __find_lt( self, a, x ):
'Find rightmost value less than x'
i = bisect.bisect_left( a, x )
if i:
return i-1
raise ValueError
def __find_ge( self, a, x):
'Find leftmost item greater than or equal to x'
i = bisect.bisect_left( a, x )
if i != len( a ):
return i
raise ValueError
def __findInJson(self, jsondict, strings):
if isinstance(strings, str):
strings = [ strings ]
| |
<reponame>raphaelsulzer/convolutional_occupancy_networks<gh_stars>0
import os
import glob
import random
import sys
from PIL import Image
import numpy as np
import trimesh
from src.data.core import Field
from src.utils import binvox_rw
from src.common import coord2index, normalize_coord
class IndexField(Field):
''' Basic index field.'''
def load(self, model_path, idx, category):
''' Loads the index field.
Args:
model_path (str): path to model
idx (int): ID of data point
category (int): index of category
'''
return idx
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
return True
# 3D Fields
class PatchPointsField(Field):
''' Patch Point Field.
It provides the field to load point data. This is used for the points
randomly sampled in the bounding volume of the 3D shape and then split to patches.
Args:
file_name (str): file name
transform (list): list of transformations which will be applied to the points tensor
multi_files (callable): number of files
'''
def __init__(self, file_name, transform=None, unpackbits=False, multi_files=None):
self.file_name = file_name
self.transform = transform
self.unpackbits = unpackbits
self.multi_files = multi_files
def load(self, model_path, idx, vol):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
vol (dict): precomputed volume info
'''
if self.multi_files is None:
file_path = os.path.join(model_path, self.file_name)
else:
num = np.random.randint(self.multi_files)
file_path = os.path.join(model_path, self.file_name, '%s_%02d.npz' % (self.file_name, num))
points_dict = np.load(file_path)
points = points_dict['points']
# Break symmetry if given in float16:
if points.dtype == np.float16:
points = points.astype(np.float32)
points += 1e-4 * np.random.randn(*points.shape)
occupancies = points_dict['occupancies']
if self.unpackbits:
occupancies = np.unpackbits(occupancies)[:points.shape[0]]
occupancies = occupancies.astype(np.float32)
# acquire the crop
ind_list = []
for i in range(3):
ind_list.append((points[:, i] >= vol['query_vol'][0][i])
& (points[:, i] <= vol['query_vol'][1][i]))
ind = ind_list[0] & ind_list[1] & ind_list[2]
data = {None: points[ind],
'occ': occupancies[ind],
}
if self.transform is not None:
data = self.transform(data)
# calculate normalized coordinate w.r.t. defined query volume
p_n = {}
for key in vol['plane_type']:
# projected coordinates normalized to the range of [0, 1]
p_n[key] = normalize_coord(data[None].copy(), vol['input_vol'], plane=key)
data['normalized'] = p_n
return data
class PointsField(Field):
''' Point Field.
It provides the field to load point data. This is used for the points
randomly sampled in the bounding volume of the 3D shape.
# TODO: for me: I need to generate such files for the reconbench shapes!!
Args:
file_name (str): file name
transform (list): list of transformations which will be applied to the points tensor
multi_files (callable): number of files
'''
def __init__(self, file_name, transform=None, unpackbits=False, multi_files=None):
self.file_name = file_name
self.transform = transform
self.unpackbits = unpackbits
self.multi_files = multi_files
def load(self, model_path, idx, category):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
category (int): index of category
'''
if self.multi_files is None:
file_path = os.path.join(model_path, self.file_name)
else:
num = np.random.randint(self.multi_files)
file_path = os.path.join(model_path, self.file_name, '%s_%02d.npz' % (self.file_name, num))
points_dict = np.load(file_path)
points = points_dict['points']
# Break symmetry if given in float16:
if points.dtype == np.float16:
points = points.astype(np.float32)
points += 1e-4 * np.random.randn(*points.shape)
occupancies = points_dict['occupancies']
if self.unpackbits:
occupancies = np.unpackbits(occupancies)[:points.shape[0]]
occupancies = occupancies.astype(np.float32)
data = {
None: points,
'occ': occupancies,
}
if self.transform is not None:
data = self.transform(data)
return data
class VoxelsField(Field):
''' Voxel field class.
It provides the class used for voxel-based data.
Args:
file_name (str): file name
transform (list): list of transformations applied to data points
'''
def __init__(self, file_name, transform=None):
self.file_name = file_name
self.transform = transform
def load(self, model_path, idx, category):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
category (int): index of category
'''
file_path = os.path.join(model_path, self.file_name)
with open(file_path, 'rb') as f:
voxels = binvox_rw.read_as_3d_array(f)
voxels = voxels.data.astype(np.float32)
if self.transform is not None:
voxels = self.transform(voxels)
return voxels
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
complete = (self.file_name in files)
return complete
class PatchPointCloudField(Field):
''' Patch point cloud field.
It provides the field used for patched point cloud data. These are the points
randomly sampled on the mesh and then partitioned.
Args:
file_name (str): file name
transform (list): list of transformations applied to data points
multi_files (callable): number of files
'''
def __init__(self, file_name, transform=None, transform_add_noise=None, multi_files=None):
self.file_name = file_name
self.transform = transform
self.multi_files = multi_files
def load(self, model_path, idx, vol):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
vol (dict): precomputed volume info
'''
if self.multi_files is None:
file_path = os.path.join(model_path, self.file_name)
else:
num = np.random.randint(self.multi_files)
file_path = os.path.join(model_path, self.file_name, '%s_%02d.npz' % (self.file_name, num))
pointcloud_dict = np.load(file_path)
points = pointcloud_dict['points'].astype(np.float32)
normals = pointcloud_dict['normals'].astype(np.float32)
# add noise globally
if self.transform is not None:
data = {None: points,
'normals': normals}
data = self.transform(data)
points = data[None]
# sensors = data['sensors']
# normals = data['normals']
# acquire the crop index
ind_list = []
for i in range(3):
ind_list.append((points[:, i] >= vol['input_vol'][0][i])
& (points[:, i] <= vol['input_vol'][1][i]))
mask = ind_list[0] & ind_list[1] & ind_list[2]# points inside the input volume
mask = ~mask # True means outside the boundary!!
data['mask'] = mask
points[mask] = 0.0
# sensors[mask] = 0.0
# calculate index of each point w.r.t. defined resolution
index = {}
for key in vol['plane_type']:
index[key] = coord2index(points.copy(), vol['input_vol'], reso=vol['reso'], plane=key)
if key == 'grid':
index[key][:, mask] = vol['reso']**3
else:
index[key][:, mask] = vol['reso']**2
data['ind'] = index
return data
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
complete = (self.file_name in files)
return complete
class PointCloudField(Field):
''' Point cloud field.
It provides the field used for point cloud data. These are the points
randomly sampled on the mesh.
# TODO: for me: for the PointCloudField I can use my own sampling, simply load the ply files and save it as npz
Args:
file_name (str): file name
transform (list): list of transformations applied to data points
multi_files (callable): number of files
'''
def __init__(self, file_name, transform=None, sensor_field=None, multi_files=None):
self.file_name = file_name
self.transform = transform
self.multi_files = multi_files
self.sensor_field = sensor_field
def add_auxiliary_points(self, pointcloud_dict):
pass
def load(self, model_path, idx, category):
''' Loads the data point.
Args:
model_path (str): path to model
idx (int): ID of data point
category (int): index of category
'''
if self.multi_files is None:
file_path = os.path.join(model_path, self.file_name)
else:
num = np.random.randint(self.multi_files)
file_path = os.path.join(model_path, self.file_name, '%s_%02d.npz' % (self.file_name, num))
pointcloud_dict = np.load(file_path)
points = pointcloud_dict['points'].astype(np.float32)
normals = pointcloud_dict['normals'].astype(np.float32)
sensors = pointcloud_dict['sensors'].astype(np.float32)
if(self.sensor_field == 'sensor_pos'):
pass
elif(self.sensor_field == 'sensor_vec'):
sensors = sensors - points
elif(self.sensor_field == 'sensor_vec+'):
# add aditional points between sensors & points
sensors = sensors - points
new_points0 = points - sensors*0.01
new_points1 = points + sensors*0.01
new_points2 = points + sensors*0.05
new_points3 = points + sensors*0.1
new_points4 = points + sensors*0.5
points=np.concatenate((points, np.zeros(shape=(points.shape[0], 1),dtype=np.float32)), axis=1)
new_points0=np.concatenate((new_points0, -1*np.ones(shape=(points.shape[0],1),dtype=np.float32)), axis=1)
new_points1=np.concatenate((new_points1, np.ones(shape=(points.shape[0],1),dtype=np.float32)), axis=1)
new_points2=np.concatenate((new_points2, np.ones(shape=(points.shape[0],1),dtype=np.float32)), axis=1)
new_points3=np.concatenate((new_points3, np.ones(shape=(points.shape[0],1),dtype=np.float32)), axis=1)
new_points4=np.concatenate((new_points4, np.ones(shape=(points.shape[0],1),dtype=np.float32)), axis=1)
# normalize the sensor vecs
sensors = sensors / np.linalg.norm(sensors, axis=1)[:, np.newaxis]
points = np.concatenate((points, new_points0, new_points1, new_points2, new_points3, new_points4))
sensors = np.concatenate((sensors,sensors,sensors,sensors,sensors,sensors))
normals = np.concatenate((normals,normals,normals,normals,normals,normals))
elif(self.sensor_field == 'sensor_vec_norm'):
sensors = sensors - points
sensors = sensors / np.linalg.norm(sensors,axis=1)[:,np.newaxis]
elif(self.sensor_field == 'sensor_angle'):
dist = np.linalg.norm(sensors - points,axis=1)
dx = sensors[:,0]-points[:,0]
dy = sensors[:,1]-points[:,1]
dz = sensors[:,2]-points[:,2]
d_xy = (dx ** 2 + dy ** 2) ** 0.5
sensors = np.stack((dx/d_xy,dz/dist,dist),axis=1)
else:
print("ERROR: not a valid sensor information format!")
sys.exit(1)
data = {
None: points,
'normals': normals,
'sensors': sensors,
}
if self.transform is not None:
data = self.transform(data)
return data
def check_complete(self, files):
''' Check if field is complete.
Args:
files: files
'''
complete = (self.file_name in files)
return complete
class PartialPointCloudField(Field):
''' Partial Point cloud field.
It provides the field used for partial point cloud data. These are the points
randomly sampled on the mesh and a bounding box with random size is applied.
Args:
file_name (str): file name
transform (list): list of transformations applied to data points
multi_files (callable): number of files
part_ratio (float): max ratio for the remaining part
'''
| |
<reponame>Taye310/Shift-Net_pytorch<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
# For original shift
from models.shift_net.InnerShiftTriple import InnerShiftTriple
from models.shift_net.InnerCos import InnerCos
# For res shift
from models.res_shift_net.innerResShiftTriple import InnerResShiftTriple
# For patch patch shift
from models.patch_soft_shift.innerPatchSoftShiftTriple import InnerPatchSoftShiftTriple
# For res patch patch shift
from models.res_patch_soft_shift.innerResPatchSoftShiftTriple import InnerResPatchSoftShiftTriple
from .unet import UnetSkipConnectionBlock
from .modules import *
################################### *************************** #####################################
################################### Shift_net #####################################
################################### *************************** #####################################
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(UnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True, use_spectral_norm=use_spectral_norm)
print(unet_block)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_shift_block = UnetSkipConnectionShiftBlock(ngf * 2, ngf * 4, opt, innerCos_list, shift_list,
mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm, layer_to_last=3) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Mention: the TripleBlock differs in `upconv` defination.
# 'cos' means that we add a `innerCos` layer in the block.
class UnetSkipConnectionShiftBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_spectral_norm=False, layer_to_last=3):
super(UnetSkipConnectionShiftBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = spectral_norm(nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1), use_spectral_norm)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = InnerShiftTriple(opt.shift_sz, opt.stride, opt.mask_thred,
opt.triple_weight, layer_to_last=layer_to_last)
shift.set_mask(mask_global)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCos = InnerCos(strength=opt.strength, skip=opt.skip, layer_to_last=layer_to_last)
innerCos.set_mask(mask_global) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCos)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
# shift triple differs in here. It is `*3` not `*2`.
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 3, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv, downnorm]
# shift should be placed after uprelu
# NB: innerCos are placed before shift. So need to add the latent gredient to
# to former part.
up = [uprelu, innerCos, shift, upconv, upnorm]
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
################################### *************************** #####################################
################################### Res Shift_net #####################################
################################### *************************** #####################################
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class ResUnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(ResUnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True, use_spectral_norm=use_spectral_norm)
print(unet_block)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_shift_block = ResUnetSkipConnectionBlock(ngf * 2, ngf * 4, opt, innerCos_list, shift_list,
mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm, layer_to_last=3) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Mention: the TripleBlock differs in `upconv` defination.
# 'cos' means that we add a `innerCos` layer in the block.
class ResUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_spectral_norm=False, layer_to_last=3):
super(ResUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = spectral_norm(nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1), use_spectral_norm)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
# As the downconv layer is outer_nc in and inner_nc out.
# So the shift define like this:
shift = InnerResShiftTriple(inner_nc, opt.shift_sz, opt.stride, opt.mask_thred,
opt.triple_weight, layer_to_last=layer_to_last)
shift.set_mask(mask_global)
shift_list.append(shift)
# Add latent constraint
# Then add the constraint to the constrain layer list!
innerCos = InnerCos(strength=opt.strength, skip=opt.skip, layer_to_last=layer_to_last)
innerCos.set_mask(mask_global) # Here we need to set mask for innerCos layer too.
innerCos_list.append(innerCos)
# Different position only has differences in `upconv`
# for the outermost, the special is `tanh`
if outermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
# for the innermost, the special is `inner_nc` instead of `inner_nc*2`
elif innermost:
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv] # for the innermost, no submodule, and delete the bn
up = [uprelu, upconv, upnorm]
model = down + up
# else, the normal
else:
# Res shift differs with other shift here. It is `*2` not `*3`.
upconv = spectral_norm(nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1), use_spectral_norm)
down = [downrelu, downconv, downnorm]
# shift should be placed after uprelu
# NB: innerCos are placed before shift. So need to add the latent gredient to
# to former part.
up = [uprelu, innerCos, shift, upconv, upnorm]
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost: # if it is the outermost, directly pass the input in.
return self.model(x)
else:
x_latter = self.model(x)
_, _, h, w = x.size()
if h != x_latter.size(2) or w != x_latter.size(3):
x_latter = F.interpolate(x_latter, (h, w), mode='bilinear')
return torch.cat([x_latter, x], 1) # cat in the C channel
################################### *************************** #####################################
################################### patch soft shift_net #####################################
################################### *************************** #####################################
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class PatchSoftUnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64,
norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(PatchSoftUnetGeneratorShiftTriple, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True, use_spectral_norm=use_spectral_norm)
print(unet_block)
for i in range(num_downs - 5): # The innner layers number is 3 (sptial size:512*512), if unet_256.
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_shift_block = PatchSoftUnetSkipConnectionShiftTriple(ngf * 2, ngf * 4, opt, innerCos_list, shift_list,
mask_global, input_nc=None, \
submodule=unet_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm, layer_to_last=3) # passing in unet_shift_block
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_shift_block,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True,
norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
self.model = unet_block
def forward(self, input):
return self.model(input)
# Mention: the TripleBlock differs in `upconv` defination.
# 'cos' means that we add a `innerCos` layer in the block.
class PatchSoftUnetSkipConnectionShiftTriple(nn.Module):
def __init__(self, outer_nc, inner_nc, opt, innerCos_list, shift_list, mask_global, input_nc, \
submodule=None, shift_layer=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d,
use_spectral_norm=False, layer_to_last=3):
super(PatchSoftUnetSkipConnectionShiftTriple, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = spectral_norm(nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1), use_spectral_norm)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
# As the downconv | |
<reponame>bryanchriswhite/agents-aea
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the strategy class of the confirmation aw1 skill."""
import logging
from pathlib import Path
from typing import cast
from unittest.mock import patch
from aea.crypto.ledger_apis import LedgerApis
from aea.helpers.transaction.base import Terms
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.confirmation_aw1.registration_db import RegistrationDB
from packages.fetchai.skills.confirmation_aw1.strategy import (
DEVELOPER_ONLY_REQUIRED_KEYS,
PUBLIC_ID,
REQUIRED_KEYS,
Strategy,
)
from tests.conftest import ROOT_DIR
class TestStrategy(BaseSkillTestCase):
"""Test Strategy of confirmation aw1."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "confirmation_aw1")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.token_denomination = "atestfet"
cls.token_dispense_amount = 100000
cls.fetchai_staking_contract_address = (
"0x351bac612b50e87b46e4b10a282f632d41397de2"
)
cls.override_staking_check = False
cls.awx_aeas = []
cls.strategy = Strategy(
token_denomination=cls.token_denomination,
token_dispense_amount=cls.token_dispense_amount,
fetchai_staking_contract_address=cls.fetchai_staking_contract_address,
override_staking_check=cls.override_staking_check,
awx_aeas=cls.awx_aeas,
name="strategy",
skill_context=cls._skill.skill_context,
)
cls.address = "some_address"
cls.info = {
"ethereum_address": "some_value",
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
"tweet": "some_tweet",
}
cls.logger = cls._skill.skill_context.logger
cls.db = cast(RegistrationDB, cls._skill.skill_context.registration_db)
def test__init__(self):
"""Test the __init__ of Strategy class."""
assert self.strategy._is_ready_to_register is False
assert self.strategy._is_registered is False
assert self.strategy.is_registration_pending is False
assert self.strategy.signature_of_ethereum_address is None
assert self.strategy._ledger_id == self.skill.skill_context.default_ledger_id
assert self.strategy._max_tx_fee == 100
assert self.strategy._contract_ledger_id == "ethereum"
assert self.strategy._contract_callable == "get_stake"
assert self.strategy._contract_id == str(PUBLIC_ID)
assert self.strategy._in_process_registrations == {}
def test_properties(self):
"""Test the properties of Strategy class."""
assert self.strategy.contract_id == self.strategy._contract_id
assert self.strategy.contract_address == self.fetchai_staking_contract_address
assert self.strategy.contract_ledger_id == self.strategy._contract_ledger_id
assert self.strategy.contract_callable == self.strategy._contract_callable
assert self.strategy.awx_aeas == self.awx_aeas
assert self.strategy.all_registered_aeas == []
def test_lock_registration_temporarily(self):
"""Test the lock_registration_temporarily method of the Strategy class."""
# before
assert self.address not in self.strategy._in_process_registrations
# operation
self.strategy.lock_registration_temporarily(self.address, self.info)
# after
assert self.strategy._in_process_registrations[self.address] == self.info
def test_finalize_registration_i(self):
"""Test the finalize_registration method of the Strategy class where NOT developer_only_mode."""
# setup
self.strategy.developer_handle_only = False
self.strategy.lock_registration_temporarily(self.address, self.info)
# operation
with patch.object(self.db, "set_registered") as mock_set:
with patch.object(self.logger, "log") as mock_logger:
self.strategy.finalize_registration(self.address)
# after
assert self.address not in self.strategy._in_process_registrations
mock_logger.assert_any_call(
logging.INFO,
f"finalizing registration for address={self.address}, info={self.info}",
)
mock_set.assert_any_call(
address=self.address,
ethereum_address=self.info["ethereum_address"],
ethereum_signature=self.info["signature_of_ethereum_address"],
fetchai_signature=self.info["signature_of_fetchai_address"],
developer_handle=self.info["developer_handle"],
tweet=self.info.get("tweet", ""),
)
def test_finalize_registration_ii(self):
"""Test the finalize_registration method of the Strategy class where IS developer_only_mode."""
# setup
self.strategy.developer_handle_only = True
self.strategy.lock_registration_temporarily(self.address, self.info)
# operation
with patch.object(self.db, "set_registered_developer_only") as mock_set:
with patch.object(self.logger, "log") as mock_logger:
self.strategy.finalize_registration(self.address)
# after
assert self.address not in self.strategy._in_process_registrations
mock_logger.assert_any_call(
logging.INFO,
f"finalizing registration for address={self.address}, info={self.info}",
)
mock_set.assert_any_call(
address=self.address, developer_handle=self.info["developer_handle"],
)
def test_unlock_registration(self):
"""Test the unlock_registration method of the Strategy class."""
# setup
self.strategy.lock_registration_temporarily(self.address, self.info)
# before
assert self.address in self.strategy._in_process_registrations
# operation
with patch.object(self.logger, "log") as mock_logger:
self.strategy.unlock_registration(self.address)
# after
assert self.address not in self.strategy._in_process_registrations
mock_logger.assert_any_call(
logging.INFO,
f"registration info did not pass staking checks = {self.info}",
)
def test_get_developer_handle(self):
"""Test the get_developer_handle method of the Strategy class."""
# operation
with patch.object(self.db, "get_developer_handle") as mock_get:
self.strategy.get_developer_handle(self.address)
# after
mock_get.assert_any_call(self.address)
def test_valid_registration_succeeds(self):
"""Test the valid_registration method of the Strategy class which succeeds."""
# setup
registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
# operation
with patch.object(
self.strategy, "_valid_signature", return_value=True
) as mock_valid:
with patch.object(self.db, "is_registered", return_value=False) as mock_is:
is_valid, code, msg = self.strategy.valid_registration(
registration_info, self.address
)
# after
mock_is.assert_called_once()
mock_valid.assert_called()
assert is_valid
assert code == 0
assert msg == "all good!"
def test_valid_registration_fails_i(self):
"""Test the valid_registration method of the Strategy class which fails because some key is missing."""
# setup
incorrect_registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
}
# operation
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert msg == f"missing keys in registration info, required: {REQUIRED_KEYS}!"
def test_valid_registration_fails_ii(self):
"""Test the valid_registration method of the Strategy class which fails because addresses do not match."""
# setup
different_addres = "some_other_address"
incorrect_registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": different_addres,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
# operation
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert msg == "fetchai address of agent and registration info do not match!"
def test_valid_registration_fails_iii(self):
"""Test the valid_registration method of the Strategy class which fails because _valid_signature returns False for first call."""
# setup
incorrect_registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
# operation
with patch.object(
self.strategy, "_valid_signature", return_value=False
) as mock_valid:
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
mock_valid.assert_called_once()
assert not is_valid
assert code == 1
assert msg == "fetchai address and signature do not match!"
def test_valid_registration_fails_iv(self):
"""Test the valid_registration method of the Strategy class which fails because _valid_signature returns False for second call."""
# setup
incorrect_registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
# operation
with patch.object(
self.strategy, "_valid_signature", side_effect=[True, False]
) as mock_valid:
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
mock_valid.assert_called()
assert not is_valid
assert code == 1
assert msg == "ethereum address and signature do not match!"
def test_valid_registration_fails_v(self):
"""Test the valid_registration method of the Strategy class which fails because no developer_handle was provided."""
# setup
incorrect_registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "",
}
# operation
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert msg == "missing developer_handle!"
def test_valid_registration_fails_vi(self):
"""Test the valid_registration method of the Strategy class which fails because agent registration is in progress."""
# setup
registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
self.strategy.lock_registration_temporarily(self.address, self.info)
# operation
is_valid, code, msg = self.strategy.valid_registration(
registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert msg == "registration in process for this address!"
def test_valid_registration_fails_vii(self):
"""Test the valid_registration method of the Strategy class which fails because agent already registered."""
# setup
registration_info = {
"ethereum_address": "some_ethereum_address",
"fetchai_address": self.address,
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
}
# operation
with patch.object(self.db, "is_registered", return_value=True) as mock_is:
is_valid, code, msg = self.strategy.valid_registration(
registration_info, self.address
)
# after
mock_is.assert_called_once()
assert not is_valid
assert code == 1
assert msg == "already registered!"
def test_valid_registration_fails_developer_only_mode_i(self):
"""Test the valid_registration method of the Strategy class in developer_only_mode which fails because some key is missing."""
# setup
self.strategy.developer_handle_only = True
incorrect_registration_info = {
"fetchai_address": self.address,
}
# operation
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert (
msg
== f"missing keys in registration info, required: {DEVELOPER_ONLY_REQUIRED_KEYS}!"
)
def test_valid_registration_fails_developer_only_mode_ii(self):
"""Test the valid_registration method of the Strategy class which fails because addresses do not match."""
# setup
self.strategy.developer_handle_only = True
different_addres = "some_other_address"
incorrect_registration_info = {
"fetchai_address": different_addres,
"developer_handle": "some_developer_handle",
}
# operation
is_valid, code, msg = self.strategy.valid_registration(
incorrect_registration_info, self.address
)
# after
assert not is_valid
assert code == 1
assert msg == "fetchai address of agent and registration info do not match!"
def test__valid_signature_i(self):
"""Test the _valid_signature method of the Strategy class where result is True."""
# setup
expected_signer = "some_expected_signer"
signature = "some_signature"
message_str = "some_message_str"
ledger_id = "some_ledger_id"
# operation
with patch.object(
LedgerApis, "recover_message", return_value=(expected_signer,)
) as mock_recover:
is_valid = self.strategy._valid_signature(
expected_signer, signature, message_str, ledger_id
)
# after
mock_recover.assert_called_once()
assert is_valid
def test__valid_signature_ii(self):
"""Test the _valid_signature method of the Strategy class where result is False."""
# setup
expected_signer = "some_expected_signer"
signature = "some_signature"
message_str = "some_message_str"
ledger_id = "some_ledger_id"
# operation
with patch.object(
LedgerApis, "recover_message", return_value=("some_other_signer",)
) as mock_recover:
is_valid = self.strategy._valid_signature(
expected_signer, signature, message_str, ledger_id
)
# after
mock_recover.assert_called_once()
assert not is_valid
def test__valid_signature_iii(self):
"""Test the _valid_signature method of the Strategy class where an exception is raised."""
# setup
expected_signer = "some_expected_signer"
signature = "some_signature"
message_str = "some_message_str"
ledger_id = "some_ledger_id"
exception_message = "some_exception_message"
# operation
| |
<reponame>phrb/orio_experiments
# ZestyParser 0.8.1 -- Parses in Python zestily
# Copyright (C) 2006-2007 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
@group Basic Tokens: Raw,RE,RawToken,Token,TakeToken
@group Complex Tokens: CompositeToken,TokenSequence,TokenSeries
@group Special Tokens: Defer,Default,Lookahead,Negative
@group TokenSequence Flags: Omit,Skip,Only
@version: 0.8.1
@author: <NAME>
@copyright: Copyright 2006-2007 <NAME>. Released under the MIT license (see LICENSE.txt).
@contact: <EMAIL>
@var EmptyToken: A L{Default} instance initialized with the empty string.
@var EOF: A token which matches (and returns C{None}) if the parser is at the end of its L{data <ZestyParser.data>} sequence.
In ZestyParser, a token object must, at minimum, be a callable taking a L{ZestyParser <Parser.ZestyParser>} instance and its current L{cursor <ZestyParser.cursor>} as parameters. It can do whatever it needs with the parser's L{data <ZestyParser.data>} and L{cursor <ZestyParser.cursor>} properties before returning. It may raise L{NotMatched} to indicate to the L{ZestyParser <Parser.ZestyParser>} instance that it failed to match; it may also raise L{ParseError} to indicate, for instance, that it began matching successfully but encountered an unrecoverable error.
The L{Tokens} module contains a variety of predefined token classes (instances of which are callable) and other valid token objects which should cover most parsing situations.
"""
import re, copy, types, warnings
from Parser import NotMatched, ParseError
__all__ = (
"Placeholder",
"AbstractToken",
"TokenWrapper",
"RE",
"Raw",
"Token",
"RawToken",
"CompositeToken",
"TokenSequence",
"TakeToken",
"TokenSeries",
"EmptyToken",
"Default",
"Skip",
"Omit",
"Only",
"Defer",
"Lookahead",
"Negative",
"EOF",
"Whitespace",
"Const",
"Inf",
)
rstack = []
replstack = []
Inf = -1
def count_args(callable):
t = type(callable)
if t is types.FunctionType:
return callable.func_code.co_argcount
elif t is types.ClassType:
return callable.__init__.im_func.func_code.co_argcount - 1
elif t is types.InstanceType:
return callable.__call__.im_func.func_code.co_argcount - 1
elif t is types.MethodType:
return callable.im_func.func_code.co_argcount - 1
# assume it's some builtin that only takes the data itself as a parameter
return 1
class Placeholder:
def __init__(self, key=None):
self.key = key
def __eq__(self, key):
return self.key == key
def __hash__(self):
return hash(self.key)
def __call__(self, key=None):
return Placeholder(key)
def _single_replace(cls, subj, vals, kwvals):
if isinstance(subj, cls):
if vals and subj == None:
return vals.pop(0)
elif subj in kwvals and subj != None:
return kwvals[subj]
elif isinstance(subj, AbstractToken):
subj._replace(vals, kwvals)
return subj
_single_replace = classmethod(_single_replace)
def _list_replace(cls, subj, vals, kwvals):
for i, v in enumerate(subj):
subj[i] = cls._single_replace(v, vals, kwvals)
_list_replace = classmethod(_list_replace)
def __repr__(self):
return "<Placeholder %r>" % self.key
class ListReplacing:
def _replace(self, vals, kwvals):
if self not in replstack:
replstack.append(self)
Placeholder._list_replace(self.desc, vals, kwvals)
replstack.pop()
class SingleReplacing:
def _replace(self, vals, kwvals):
if self not in replstack:
replstack.append(self)
self.desc = Placeholder._single_replace(self.desc, vals, kwvals)
replstack.pop()
class AbstractToken(object):
"""
Base class from which most tokens defined in this module derive. Subclassing this is not required for writing tokens, since they can be any callable with certain semantics, but this class provides several useful services for creating reusable token classes, such as callback support and convenient operator overloading.
@ivar desc: The generic "description" variable which stores the "essence" of any given instance. Subclasses use this as needed.
@ivar callback: An optional callable which, if not None, will be called whenever an instance matches successfully. It may take one, two, or three parameters, depending on its needs. If one, it will be passed whatever data the token matched (i.e. whatever it would normally have returned upon being called). If two, it will be passed the L{ZestyParser <Parser.ZestyParser>} instance and the data. If three, it will be passed the parser, the data, and the what the parser's cursor was when this token started matching. Callbacks may raise L{NotMatched} or L{ParseError} with the usual behaviour. They should also return a value, which will be returned to the calling L{ZestyParser <Parser.ZestyParser>} instance.
@ivar to: An optional callable which, if not None, will be called in the same manner as a callback (after any callback and before returning to the parser instance), but will be passed only one argument: the data matched (or returned by the callback, if any). Its main purpose is to allow you to concisely do things like C{Token('[0-9]+', group=0, to=int)} -- the builtin callable C{int} will be passed the text matched by the regex, so the token will ultimately return an integer instead of a string or a regex match object. You can also use this property with L{AHT} types, for more complex multi-stage parsing. See the C{n3.py} and C{n3rdflib.py} examples for a demonstration of this. (In previous versions, this was passed to the initializer as C{as}, but this is deprecated because C{as} will become a reserved word in Python 2.6. Change your code to use {to}.)
"""
name = None
failMessage = None
callback = None
to = None
# 'as' is deprecated in favor of 'to' since it's becoming a reserved word
def __init__(self, desc, callback=None, to=None, name=None):
self.desc = desc
self.callback = callback
self.to = to
self.name = name
def __repr__(self):
return "%s %s" % (self.__class__.__name__, (self.name or str(self)))
def __str__(self):
return ""
def _make_callbackrun(self, func, callback):
argcount = count_args(callback)
if argcount == 1:
def f(parser, origCursor):
return callback(func(parser, origCursor))
elif argcount == 2:
def f(parser, origCursor):
return callback(parser, func(parser, origCursor))
elif argcount == 3:
def f(parser, origCursor):
return callback(parser, func(parser, origCursor), origCursor)
return f
def _make_torun(self, func):
def f(parser, origCursor):
return self.to(func(parser, origCursor))
return f
def _make_failcheck(self, func):
def f(parser, origCursor):
try:
data = func(parser, origCursor)
return data
except NotMatched:
if self.failMessage is True:
raise ParseError(parser, "Expected %s" % str(self))
elif self.failMessage:
raise ParseError(parser, self.failMessage)
else:
raise
return f
def _poke(self):
c = self.__call__
if self.callback:
c = self._make_callbackrun(c, self.callback)
if self.to:
c = self._make_torun(c)
if self.failMessage:
c = self._make_failcheck(c)
if c is self.__call__ and not isinstance(self, Defer):
self.parse = None
del self.parse
else:
self.parse = c
def __copy__(self):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n._poke()
n.desc = copy.copy(self.desc)
return n
def __setattr__(self, name, value):
super(AbstractToken, self).__setattr__(name, value)
if name in ("callback", "failMessage", "to"):
self._poke()
def __add__(self, other):
"""Allows you to construct L{TokenSequence}s with the + operator."""
return TokenSequence([self, other])
def __sub__(self, other):
"""Allows you to construct L{TokenSequence}s with the - operator, automatically padded with L{Whitespace}.
I realize it's a bit weird to use the - operator for this, but the main motivation is giving it the same precedence as +. Still, you can read it as a sort of "blank" (which is what the left and right tokens are being joined by), instead of "minus"."""
return TokenSequence([self, Whitespace, other])
def __or__(self, other):
"""Allows you to construct L{CompositeToken}s with the | operator."""
return CompositeToken([self, other])
def __mul__(self, val):
"""Allows you to construct L{TokenSeries} with the * operator. Operand can be:
- int (a series of exactly this many)
- (int, ) (a series of at least this many)
- (x:int, y:int) a series of x to y
The constant Inf can be used in some of these -- * Inf yields a 0--infinity series, and * (x, Inf) yields an x--infinity series.
"""
t = TokenSeries(self)
if isinstance(val, int):
t.min = t.max = val
elif isinstance(val, tuple):
if len(val) == 2:
t.min, t.max = val
elif len(val) == 1:
t.min = val
return t
__rmul__ = __mul__
def __rshift__(self, callback):
"""
Convenience |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.