text stringlengths 957 885k |
|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from gi.repository import Notify
from PySide import QtCore, QtGui, QtDeclarative
import subprocess
import os
import datetime
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
try:
from gi.repository import Unity
LAUNCHER = Unity.LauncherEntry.get_for_desktop_id("zeegaree.desktop")
except ImportError:
class NullLauncher:
def set_property(self, prop_name, enabled): pass
LAUNCHER = NullLauncher()
NOTIFICATION_ICON = os.path.join(os.path.dirname(__file__), "./images/z_128.png")
TRAY_ICON = os.path.join(os.path.dirname(__file__), "./images/mono_color_32.png")
HOME = os.path.expanduser("~")
def _get_path(app_id):
return '/' + app_id.replace('.', '/')
def listen_for_activation(app_id, window):
"""
Listen for 'activate' events. If one is sent, activate 'window'.
"""
class MyDBUSService(dbus.service.Object):
def __init__(self, window):
self.window = window
bus_name = dbus.service.BusName(app_id, bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, _get_path(app_id))
@dbus.service.method(app_id)
def activate(self):
trayIcon.onShowMainWindow()
DBusGMainLoop(set_as_default=True)
_myservice = MyDBUSService(window)
def activate_if_already_running(app_id):
"""
Activate the existing window if it's already running. Return True if found
an existing window, and False otherwise.
"""
bus = dbus.SessionBus()
try:
programinstance = bus.get_object(app_id, _get_path(app_id))
activate = programinstance.get_dbus_method('activate', app_id)
except dbus.exceptions.DBusException:
return False
else:
print("A running process was found. Activating it.")
activate()
return True
finally:
bus.close()
class Notification(QtCore.QObject):
""" Notification about Timer and Pomodoro events
Notification uses Ubuntu Notify and sound played with paplay"""
@QtCore.Slot(str, str, str)
def somethingFinished(self, subject, body, soundfile):
timerSound = os.path.join(os.path.dirname(__file__), soundfile)
Notify.init("Timer finished")
notification = Notify.Notification.new (subject, body, NOTIFICATION_ICON)
notification.set_urgency(Notify.Urgency.CRITICAL)
notification.show()
subprocess.Popen(["paplay", timerSound])
class Ticking(QtCore.QObject):
""" Ticking sound when doing work """
@QtCore.Slot(str)
def tickTick(self, soundfile):
tickingSound = os.path.join(os.path.dirname(__file__), soundfile)
subprocess.Popen(["paplay", tickingSound])
class Launcher(QtCore.QObject):
""" Stuff specific to Unity Launcher """
@QtCore.Slot(str)
def setUrgent(self, value):
""" Setting urgent state of icon in Unity launcher """
LAUNCHER.set_property("urgent", value)
@QtCore.Slot(int, str)
def getPomodoroCount(self, value, booleen):
""" Display number of minutes of pomodoro in Unity Launcher """
LAUNCHER.set_property("count", value)
LAUNCHER.set_property("count_visible", booleen)
@QtCore.Slot(float)
def getTimerProgress(self, value):
""" Display timer progress """
LAUNCHER.set_property("progress", value)
if value > 0:
LAUNCHER.set_property("progress_visible", True)
else:
LAUNCHER.set_property("progress_visible", False)
class SaveClass(QtGui.QMainWindow):
""" Save Lap times and/or Split times from Stopwatch """
@QtCore.Slot(str, str)
def getSomethingToSave(self, file_title, text_to_write):
now = datetime.datetime.now()
file_name = file_title+" "+str(now.strftime("%x %X")+".txt")
fileName, filtr = QtGui.QFileDialog.getSaveFileName(self,
"Save "+file_title,
file_name,
"Text Files (*.txt);;All Files (*)")
if fileName:
laptimefile = open(fileName, "w+")
laptimefile.write(text_to_write)
laptimefile.close()
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, parent=None):
QtGui.QSystemTrayIcon.__init__(self, icon, parent)
menu = QtGui.QMenu(parent)
self.setContextMenu(menu)
self.showMainWindow = QtGui.QAction("Show", self, triggered = self.onShowMainWindow)
self.showMainWindow.setVisible(False)
self.hideMainWindow =QtGui.QAction("Hide", self)
self.hideMainWindow.setVisible(True)
self.hideMainWindow.triggered.connect(self.onHideMainWindow)
self.stopwatchMenuTitle = QtGui.QAction("Stopwatch", self)
self.stopwatchMenuTitle.setDisabled(True)
self.stopwatchMenuStart = QtGui.QAction("Start stopwatch", self, triggered = self.onStopwatchStart)
self.stopwatchMenuPause = QtGui.QAction("Pause stopwatch", self, triggered = self.onStopwatchPause)
self.stopwatchMenuPause.setVisible(False)
self.stopwatchMenuResume = QtGui.QAction("Resume stopwatch", self, triggered = self.onStopwatchStart)
self.stopwatchMenuResume.setVisible(False)
self.stopwatchMenuLap = QtGui.QAction("Lap", self, triggered = self.onStopwatchLap)
self.stopwatchMenuLap.setVisible(False)
self.stopwatchMenuSplit = QtGui.QAction("Split", self, triggered = self.onStopwatchSplit)
self.stopwatchMenuSplit.setVisible(False)
self.stopwatchMenuReset = QtGui.QAction("Reset stopwatch", self, triggered = self.onStopwatchReset)
self.stopwatchMenuReset.setVisible(False)
self.timerMenuTitle = QtGui.QAction("Timer", self)
self.timerMenuTitle.setDisabled(True)
self.timerMenuSet = QtGui.QAction("Set timer", self, triggered = self.onTimerSet)
self.timerMenuStop = QtGui.QAction("Stop timer", self, triggered = self.onTimerStop)
self.timerMenuStop.setVisible(False)
self.timerMenuReset = QtGui.QAction("Reset timer", self, triggered = self.onTimerReset)
self.timerMenuReset.setVisible(False)
self.workplayMenuTitle = QtGui.QAction("Work && Play", self)
self.workplayMenuTitle.setDisabled(True)
self.workplayMenuTime = QtGui.QAction("", self)
self.workplayMenuTime.setDisabled(True)
self.workplayMenuTime.setVisible(False)
self.workplayMenuStart = QtGui.QAction("Start Work", self, triggered = self.onWorkplayStart)
self.workplayMenuPause = QtGui.QAction("Pause Work", self, triggered = self.onWorkplayPause)
self.workplayMenuPause.setVisible(False)
self.workplayMenuResume = QtGui.QAction("Resume Work", self, triggered = self.onWorkplayResume)
self.workplayMenuResume.setVisible(False)
self.workplayMenuStartNextWork = QtGui.QAction("Start next Work unit", self, triggered = self.onWorkplayStartNextWork)
self.workplayMenuStartNextWork.setVisible(False)
self.workplayMenuStartNextBreak = QtGui.QAction("Start Break time", self, triggered = self.onWorkplayStartNextBreak)
self.workplayMenuStartNextBreak.setVisible(False)
self.workplayMenuStop = QtGui.QAction("Stop Work && Play", self, triggered = self.onWorkplayStop)
self.workplayMenuStop.setVisible(False)
menu.addAction(self.showMainWindow)
menu.addAction(self.hideMainWindow)
menu.addSeparator()
menu.addAction(self.stopwatchMenuTitle)
menu.addAction(self.stopwatchMenuStart)
menu.addAction(self.stopwatchMenuPause)
menu.addAction(self.stopwatchMenuResume)
menu.addAction(self.stopwatchMenuLap)
menu.addAction(self.stopwatchMenuSplit)
menu.addAction(self.stopwatchMenuReset)
menu.addSeparator()
menu.addAction(self.timerMenuTitle)
menu.addAction(self.timerMenuSet)
menu.addAction(self.timerMenuStop)
menu.addAction(self.timerMenuReset)
menu.addSeparator()
menu.addAction(self.workplayMenuTitle)
menu.addAction(self.workplayMenuTime)
menu.addAction(self.workplayMenuStart)
menu.addAction(self.workplayMenuPause)
menu.addAction(self.workplayMenuResume)
menu.addAction(self.workplayMenuStartNextWork)
menu.addAction(self.workplayMenuStartNextBreak)
menu.addAction(self.workplayMenuStop)
menu.addSeparator()
menu.addAction(QtGui.QAction("Quit", self, triggered= app.exit))
def iconActivated(self, reason):
if reason == QtGui.QSystemTrayIcon.Trigger:
trayIcon.showMainWindow.setVisible(False)
trayIcon.hideMainWindow.setVisible(True)
view.show()
view.activateWindow()
def onShowMainWindow(self):
self.showMainWindow.setVisible(False)
self.hideMainWindow.setVisible(True)
view.show()
view.activateWindow()
view.raise_()
def onHideMainWindow(self):
self.hideMainWindow.setVisible(False)
self.showMainWindow.setVisible(True)
view.hide()
def onStopwatchStart(self):
self.stopwatchMenuStart.setVisible(False)
self.stopwatchMenuPause.setVisible(True)
self.stopwatchMenuLap.setVisible(True)
self.stopwatchMenuSplit.setVisible(True)
self.stopwatchMenuReset.setVisible(True)
rootObject.startStopwatch()
@QtCore.Slot()
def onStopwatchStartFromQML(self):
trayIcon.stopwatchMenuStart.setVisible(False)
trayIcon.stopwatchMenuResume.setVisible(False)
trayIcon.stopwatchMenuPause.setVisible(True)
trayIcon.stopwatchMenuLap.setVisible(True)
trayIcon.stopwatchMenuSplit.setVisible(True)
trayIcon.stopwatchMenuReset.setVisible(True)
def onStopwatchPause(self):
self.stopwatchMenuPause.setVisible(False)
self.stopwatchMenuResume.setVisible(True)
rootObject.pauseStopwatch()
@QtCore.Slot()
def onStopwatchPauseFromQML(self):
trayIcon.stopwatchMenuPause.setVisible(False)
trayIcon.stopwatchMenuResume.setVisible(True)
def onStopwatchReset(self):
self.stopwatchMenuStart.setVisible(True)
self.stopwatchMenuPause.setVisible(False)
self.stopwatchMenuResume.setVisible(False)
self.stopwatchMenuLap.setVisible(False)
self.stopwatchMenuSplit.setVisible(False)
self.stopwatchMenuReset.setVisible(False)
rootObject.resetStopwatch()
@QtCore.Slot()
def onStopwatchResetFromQML(self):
trayIcon.stopwatchMenuStart.setVisible(True)
trayIcon.stopwatchMenuPause.setVisible(False)
trayIcon.stopwatchMenuResume.setVisible(False)
trayIcon.stopwatchMenuLap.setVisible(False)
trayIcon.stopwatchMenuSplit.setVisible(False)
trayIcon.stopwatchMenuReset.setVisible(False)
def onStopwatchLap(self):
rootObject.getLap()
def onStopwatchSplit(self):
rootObject.getSplit()
def onTimerSet(self):
self.showMainWindow.setVisible(False)
self.hideMainWindow.setVisible(True)
view.show()
view.activateWindow()
view.raise_()
rootObject.showTimer()
@QtCore.Slot()
def onTimerStartFromQML(self):
trayIcon.timerMenuSet.setVisible(False)
trayIcon.timerMenuStop.setVisible(True)
trayIcon.timerMenuReset.setVisible(True)
@QtCore.Slot()
def onTimerStopFromQML(self):
trayIcon.timerMenuSet.setVisible(True)
trayIcon.timerMenuStop.setVisible(False)
trayIcon.timerMenuReset.setVisible(False)
def onTimerStop(self):
self.timerMenuSet.setVisible(True)
self.timerMenuStop.setVisible(False)
self.timerMenuReset.setVisible(False)
rootObject.stopTimer()
def onTimerReset(self):
self.timerMenuSet.setVisible(True)
self.timerMenuStop.setVisible(False)
self.timerMenuReset.setVisible(False)
rootObject.resetTimer()
@QtCore.Slot()
def onWorkplayStartFromQML(self):
trayIcon.workplayMenuStart.setVisible(False)
trayIcon.workplayMenuPause.setVisible(True)
trayIcon.workplayMenuResume.setVisible(False)
trayIcon.workplayMenuStartNextWork.setVisible(False)
trayIcon.workplayMenuStartNextBreak.setVisible(False)
trayIcon.workplayMenuStop.setVisible(True)
@QtCore.Slot(bool, str)
def onWorkplayTimeChangefromQML(self, boolin, time):
trayIcon.workplayMenuTime.setVisible(boolin)
trayIcon.workplayMenuTime.setText(time)
view.setWindowTitle("Zeegaree | " + time)
@QtCore.Slot()
def onWorkplayPauseFromQML(self):
trayIcon.workplayMenuStart.setVisible(False)
trayIcon.workplayMenuPause.setVisible(False)
trayIcon.workplayMenuResume.setVisible(True)
trayIcon.workplayMenuStartNextWork.setVisible(False)
trayIcon.workplayMenuStartNextBreak.setVisible(False)
@QtCore.Slot()
def onWorkplayStopFromQML(self):
trayIcon.workplayMenuStart.setVisible(True)
trayIcon.workplayMenuPause.setVisible(False)
trayIcon.workplayMenuResume.setVisible(False)
trayIcon.workplayMenuTime.setVisible(False)
trayIcon.workplayMenuStartNextWork.setVisible(False)
trayIcon.workplayMenuStartNextBreak.setVisible(False)
trayIcon.workplayMenuStop.setVisible(False)
view.setWindowTitle("Zeegaree")
@QtCore.Slot()
def onWorkplayBreakFromQML(self):
trayIcon.workplayMenuStart.setVisible(False)
trayIcon.workplayMenuPause.setVisible(False)
trayIcon.workplayMenuResume.setVisible(False)
trayIcon.workplayMenuStartNextWork.setVisible(True)
trayIcon.workplayMenuStartNextBreak.setVisible(False)
trayIcon.workplayMenuStop.setVisible(True)
@QtCore.Slot()
def onWorkplayBreakWarnFromQML(self):
trayIcon.workplayMenuStart.setVisible(False)
trayIcon.workplayMenuPause.setVisible(False)
trayIcon.workplayMenuResume.setVisible(False)
trayIcon.workplayMenuStartNextWork.setVisible(False)
trayIcon.workplayMenuStartNextBreak.setVisible(True)
trayIcon.workplayMenuStop.setVisible(True)
def onWorkplayStart(self):
rootObject.startWorkplay()
def onWorkplayPause(self):
rootObject.pauseWorkplay()
def onWorkplayResume(self):
rootObject.resumeWorkplay()
def onWorkplayStartNextWork(self):
rootObject.startNextWorkWorkplay()
def onWorkplayStartNextBreak(self):
rootObject.startNextBreakWorkplay()
def onWorkplayStop(self):
rootObject.stopWorkplay()
view.setWindowTitle("Zeegaree")
class MainWindow(QtDeclarative.QDeclarativeView):
def closeEvent(self, event):
""" Don't quit app if user set so """
if rootObject.checkHideOnClose() == True:
event.ignore()
self.hide()
trayIcon.onHideMainWindow()
trayIcon.showMessage('Zeegaree', 'Running in the background.')
if __name__ == '__main__':
import sys
APP_ID = 'com.mivoligo.zeegaree'
activated = activate_if_already_running(APP_ID)
if activated:
sys.exit(0)
app = QtGui.QApplication(sys.argv)
view = MainWindow()
view.setSource(QtCore.QUrl(os.path.join(os.path.dirname(__file__),'main.qml')))
# Fit root object to be able to resize window
view.setResizeMode(view.SizeRootObjectToView)
view.setMinimumSize(QtCore.QSize(580, 480))
view.setWindowIcon(QtGui.QIcon(NOTIFICATION_ICON))
view.setWindowTitle("Zeegaree")
# Get the root object of the user interface
rootObject = view.rootObject()
icon = QtGui.QIcon(TRAY_ICON)
trayIcon = SystemTrayIcon(icon)
notification = Notification()
ticking = Ticking()
launcher = Launcher()
somethingtosave = SaveClass()
trayicon = SystemTrayIcon(trayIcon)
mainwindow = MainWindow()
context = view.rootContext()
context.setContextProperty("notification", notification)
context.setContextProperty("ticking", ticking)
context.setContextProperty("launcher", launcher)
context.setContextProperty("somethingtosave", somethingtosave)
context.setContextProperty("trayicon", trayicon)
context.setContextProperty("mainwindow", mainwindow)
trayIcon.show()
trayIcon.activated.connect(trayicon.iconActivated)
view.show()
listen_for_activation(APP_ID, view)
sys.exit(app.exec_())
|
from __future__ import print_function
import numpy as np
import nlcpy as vp
import numba
from math import *
import time
# target libraries
nb = 'numba'
vp_naive = 'nlcpy_naive'
vp_sca = 'nlcpy_sca'
@numba.stencil
def numba_kernel_1(din):
return (din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, -1, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_2(din):
return (din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_3(din):
return (din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_4(din):
return (din[0, 0, -4] +
din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, 0, 4] +
din[0, -4, 0] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 4, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.njit
def numba_launcher(din, dout, N, I=1):
for _ in range(I):
if N == 1:
numba_kernel_1(din, out=dout)
elif N == 2:
numba_kernel_2(din, out=dout)
elif N == 3:
numba_kernel_3(din, out=dout)
elif N == 4:
numba_kernel_4(din, out=dout)
def numba_impl(din, dout, N, I=1):
# warmup
numba_launcher(din, dout, N, I=1)
s = time.time()
numba_launcher(din, dout, N, I=I)
e = time.time()
return e - s
def nlcpy_naive_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
vp.request.flush()
s = time.time()
for _ in range(I):
dout_v = dout[:, N:-N, N:-N]
dout_v[...] = 0
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
dout_v += din[:, N+ly:din.shape[-2]-N+ly, N+lx:din.shape[-1]-N+lx]
vp.request.flush()
e = time.time()
return e - s
def nlcpy_sca_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
sin, sout = vp.sca.create_descriptor((din, dout))
d = vp.sca.empty_description()
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
d += sin[0, ly, lx]
kern = vp.sca.create_kernel(d, sout[0, 0, 0])
vp.request.flush()
s = time.time()
for _ in range(I):
kern.execute()
vp.request.flush()
e = time.time()
return e - s
def stencil_xya(din, dout, N, I=1, xp=np, lib=nb):
if lib is nb:
rt = numba_impl(din, dout, N, I)
if lib is vp_naive:
rt = nlcpy_naive_impl(din, dout, N, I)
if lib is vp_sca:
rt = nlcpy_sca_impl(din, dout, N, I)
return rt
|
import json
import logging
import os
import shutil
import sys
import tarfile
import numpy as np
from qtpy import QtCore
import llspy
import llspy.gui.exceptions as err
from llspy.gui.helpers import byteArrayToString, newWorkerThread, shortname
logger = logging.getLogger(__name__) # set root logger
try:
_CUDABIN = llspy.cudabinwrapper.get_bundled_binary()
except llspy.cudabinwrapper.CUDAbinException:
_CUDABIN = None
class SubprocessWorker(QtCore.QObject):
"""This worker class encapsulates a QProcess (subprocess) for a given binary.
It is intended to be subclassed, with new signals added as necessary and
methods overwritten. The work() method is where the subprocess gets started.
procReadyRead, and procErrorRead define what to do with the stdout and stderr
outputs.
"""
processStarted = QtCore.Signal()
finished = QtCore.Signal()
def __init__(self, binary, args, env=None, wid=1, **kwargs):
super().__init__()
self.id = int(wid)
self.binary = llspy.util.which(binary)
if not binary:
raise err.MissingBinaryError(
f"Binary not found or not executable: {self.binary}"
)
self.args = args
self.env = env
self.polling_interval = 100
self.name = "Subprocess"
self.__abort = False
self._logger = logging.getLogger("llspy.worker." + type(self).__name__)
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.procReadyRead)
self.process.readyReadStandardError.connect(self.procErrorRead)
@QtCore.Slot()
def work(self):
"""
this worker method does work that takes a long time. During this time,
the thread's event loop is blocked, except if the application's
processEvents() is called: this gives every thread (incl. main) a
chance to process events, which means processing signals received
from GUI (such as abort).
"""
logger.debug(f"Subprocess {self.name} START")
self._logger.info(
"~" * 20 + "\nRunning {} thread_{} with args: "
"\n{}\n".format(self.binary, self.id, " ".join(self.args)) + "\n"
)
self.process.finished.connect(self.onFinished)
self.process.finished.connect(
lambda: logger.debug(f"Subprocess {self.name} FINISH")
)
# set environmental variables (for instance, to chose GPU)
if self.env is not None:
sysenv = QtCore.QProcessEnvironment.systemEnvironment()
for k, v in self.env.items():
sysenv.insert(k, str(v))
logger.debug(f"Setting Environment Variable: {k} = {v}")
self.process.setProcessEnvironment(sysenv)
self.process.start(self.binary, self.args)
self.processStarted.emit()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.check_events)
self.timer.start(self.polling_interval)
def check_events(self):
QtCore.QCoreApplication.instance().processEvents()
if self.__abort:
# self.process.terminate() # didn't work on Windows
self.process.kill()
# note that "step" value will not necessarily be same for every thread
self._logger.info(f"aborting {self.name} #{self.id}")
self.process.waitForFinished()
@QtCore.Slot()
def procReadyRead(self):
line = byteArrayToString(self.process.readAllStandardOutput())
if line != "":
self._logger.update(line.rstrip())
@QtCore.Slot()
def procErrorRead(self):
self._logger.error(f"Error in subprocess: {self.name}")
line = byteArrayToString(self.process.readAllStandardError())
if line != "":
self._logger.error(line.rstrip())
@QtCore.Slot(int, QtCore.QProcess.ExitStatus)
def onFinished(self, exitCode, exitStatus):
statusmsg = {0: "exited normally", 1: "crashed"}
self._logger.info(
"{} #{} {} with exit code: {}".format(
self.name, self.id, statusmsg[exitStatus], exitCode
)
)
self.finished.emit()
@QtCore.Slot()
def abort(self):
self._logger.info(f"{self.name} #{self.id} notified to abort")
self.__abort = True
class CudaDeconvWorker(SubprocessWorker):
file_finished = QtCore.Signal()
finished = QtCore.Signal(int) # worker id
def __init__(self, args, env=None, **kwargs):
binaryPath = _CUDABIN
super().__init__(binaryPath, args, env, **kwargs)
self.name = "CudaDeconv"
def procReadyRead(self):
while self.process.canReadLine():
line = self.process.readLine()
line = byteArrayToString(line)
if ">>>file_finished" in line:
self.file_finished.emit()
elif "Iteration" in line:
self._logger.info(f"CUDAworker {self.id}: " + line.rstrip())
else:
self._logger.info(line.rstrip())
@QtCore.Slot(int, QtCore.QProcess.ExitStatus)
def onFinished(self, exitCode, exitStatus):
statusmsg = {0: "exited normally", 1: "crashed"}
self._logger.info(
"{} #{} {} with exit code: {}".format(
self.name, self.id, statusmsg[exitStatus], exitCode
)
)
self.finished.emit(self.id)
class CompressionWorker(SubprocessWorker):
status_update = QtCore.Signal(str, int)
def __init__(self, path, mode="compress", binary=None, wid=1, **kwargs):
if binary is None:
if sys.platform.startswith("win32"):
binary = "pigz"
else:
binary = "lbzip2"
binary = llspy.util.which(binary)
if not binary:
raise err.MissingBinaryError(
f"No binary found for compression program: {binary}"
)
super().__init__(binary, [], wid, **kwargs)
self.path = path
self.mode = mode
self.name = "CompressionWorker"
@QtCore.Slot()
def work(self):
if self.mode == "decompress":
self.status_update.emit(f"Decompressing {shortname(self.path)}...", 0)
tar_compressed = llspy.util.find_filepattern(self.path, "*.tar*")
tar_extension = os.path.splitext(tar_compressed)[1]
if tar_extension not in llspy.compress.EXTENTIONS:
self._logger.error("Unexpected uncompressed tar file found")
raise err.LLSpyError(
"found a tar file, but don't know how to decompress"
)
if self.binary not in llspy.compress.EXTENTIONS[tar_extension]:
for compbin in llspy.compress.EXTENTIONS[tar_extension]:
if llspy.util.which(compbin):
self.binary = llspy.util.which(compbin)
break
if not self.binary:
raise err.MissingBinaryError(
"No binary found for compression program: {}".format(
llspy.compress.EXTENTIONS[tar_extension]
)
)
self.args = ["-dv", tar_compressed]
self.process.finished.connect(
lambda: self.untar(os.path.splitext(tar_compressed)[0])
)
elif self.mode == "compress":
if llspy.util.find_filepattern(self.path, "*.tar*"):
raise err.LLSpyError(
"There are both raw tiffs and a compressed file in "
"directory: {}".format(self.path),
"If you would like to compress this directory, "
"please either remove any existing *.tar files, or remove "
"the uncompressed tiff files. Alternatively, you can use "
"the Decompress Raw function to decompress the *.tar file. "
"This will overwrite any raw tiffs with matching names",
)
self.status_update.emit(f"Compressing {shortname(self.path)}...", 0)
tarball = llspy.compress.tartiffs(self.path)
self.args = ["-v", tarball]
self.process.finished.connect(self.finished.emit)
msg = "\nRunning {} thread_{} with args:\n{}\n".format(
self.name, self.id, self.binary + " " + " ".join(self.args)
)
self._logger.info("~" * 20 + msg)
self.process.start(self.binary, self.args)
self.processStarted.emit()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.check_events)
self.timer.start(self.polling_interval)
def untar(self, tarball, delete=True):
if not os.path.isfile(tarball):
self.finished.emit()
return
try:
with tarfile.open(tarball) as tar:
tar.extractall(path=os.path.dirname(tarball))
except Exception:
raise
if delete:
os.remove(tarball)
self.finished.emit()
@QtCore.Slot()
def procErrorRead(self):
# for some reason, lbzip2 puts its verbose output in stderr
line = byteArrayToString(self.process.readAllStandardError())
if line != "":
if "%" in line:
self.result_string = line
self._logger.info(line.rstrip())
# class CorrectionWorker(QtCore.QObject):
# """docstring for ImCorrector"""
# finished = QtCore.Signal()
# error = QtCore.Signal()
# def __init__(self, path, tRange, camparams, median, target):
# super(CorrectionWorker, self).__init__()
# self.path = path
# self.tRange = tRange
# self.camparams = camparams
# self.median = median
# self.target = target
# self.E = llspy.LLSdir(self.path)
# @QtCore.Slot()
# def work(self):
# try:
# self.E.correct_flash(trange=self.tRange, camparamsPath=self.camparams,
# median=self.median, target=self.target)
# except Exception:
# self.error.emit()
# raise
# self.finished.emit()
def divide_arg_queue(E, n_gpus, binary):
"""generate all the channel specific cudaDeconv arguments for this item."""
argQueue = []
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
P = E.localParams()
cudaOpts = P.copy()
n_time = len(P.tRange)
for i, chan in enumerate(P.cRange):
# generate channel and gpu specific options
cudaOpts["input-dir"] = str(E.path)
cudaOpts["otf-file"] = P.otfs[i]
cudaOpts["background"] = P.background[i] if not P.correctFlash else 0
cudaOpts["wavelength"] = float(E.parameters.channels[chan]) / 1000
# if the number of GPUs is less than or equal to
# the number of timepoints being processed
# then split the work across GPUs by processing each channel,
# diving time across the available GPUS
if n_gpus <= n_time:
tRanges = list(split(P.tRange, n_gpus))
for tRange in tRanges:
# filter by channel and trange
if len(tRange) == E.parameters.nt:
cudaOpts["filename-pattern"] = f"_ch{chan}_"
else:
cudaOpts["filename-pattern"] = "_ch{}.*_stack{}".format(
chan, llspy.util.pyrange_to_perlregex(tRange)
)
argQueue.append(binary.assemble_args(**cudaOpts))
# if there are more GPUs than timepoints available
# (e.g. single stack of multiple channels)
# then if there are enough gpus to cover all channels,
# divide channels across gpus
else:
cudaOpts["filename-pattern"] = f"_ch{chan}_"
argQueue.append(binary.assemble_args(**cudaOpts))
return argQueue
class LLSitemWorker(QtCore.QObject):
sig_starting_item = QtCore.Signal(str, int) # item path, numfiles
status_update = QtCore.Signal(str) # update mainGUI status
progressUp = QtCore.Signal() # set progressbar value
progressValue = QtCore.Signal(int) # set progressbar value
progressMaxVal = QtCore.Signal(int) # set progressbar maximum
clockUpdate = QtCore.Signal(str) # set progressbar value
file_finished = QtCore.Signal() # worker id, filename
finished = QtCore.Signal()
sig_abort = QtCore.Signal()
error = QtCore.Signal()
skipped = QtCore.Signal(str)
def __init__(self, lls_dir, wid, opts, **kwargs):
super().__init__()
if isinstance(lls_dir, llspy.LLSdir):
self.E = lls_dir
self.path = str(self.E.path)
else:
self.path = str(lls_dir)
self.E = llspy.LLSdir(self.path)
self.__id = int(wid)
self.opts = opts
self.shortname = shortname(self.path)
self.aborted = False
self.__argQueue = [] # holds all argument lists that will be sent to threads
self.GPU_SET = QtCore.QCoreApplication.instance().gpuset
self.__CUDAthreads = {gpu: None for gpu in self.GPU_SET}
if not len(self.GPU_SET):
self.error.emit()
raise err.InvalidSettingsError("No GPUs selected. Check Config Tab")
self._logger = logging.getLogger("llspy.worker." + type(self).__name__)
@QtCore.Slot()
def work(self):
if self.E.is_compressed():
self.status_update.emit(f"Decompressing {self.E.basename}")
self.E.decompress()
if not self.E.ready_to_process:
if not self.E.has_lls_tiffs:
self._logger.warning(f"No TIFF files to process in {self.path}")
if not self.E.parameters.isReady():
self._logger.warning(f"Incomplete parameters for: {self.path}")
self.skipped.emit(self.path)
return
# this needs to go here instead of __init__ in case folder is compressed
try:
self.P = self.E.localParams(**self.opts)
except Exception:
self.error.emit()
self.finished.emit()
raise
# we process one folder at a time. Progress bar updates per Z stack
# so the maximum is the total number of timepoints * channels
self.nFiles = len(self.P.tRange) * len(self.P.cRange)
self._logger.info("#" * 50)
self._logger.info(f"Processing {self.E.basename}")
self._logger.info("#" * 50 + "\n")
self._logger.debug(f"Full path {self.path}")
self._logger.debug(f"Parameters {self.E.parameters}\n")
if self.P.correctFlash:
try:
self.status_update.emit(
f"Correcting Flash artifact on {self.E.basename}"
)
self.E.path = self.E.correct_flash(**self.P)
except llspy.llsdir.LLSpyError:
self.error.emit()
raise
# if not flash correcting but there is trimming/median filter requested
elif self.P.medianFilter or any(
[any(i) for i in (self.P.trimX, self.P.trimY, self.P.trimZ)]
):
self.E.path = self.E.median_and_trim(**self.P)
self.nFiles_done = 0
self.progressValue.emit(0)
self.progressMaxVal.emit(self.nFiles)
self.status_update.emit(f"Processing {self.E.basename}: (0 of {self.nFiles})")
# only call cudaDeconv if we need to deskew or deconvolve
if self.P.nIters > 0 or (self.P.deskew != 0 and self.P.saveDeskewedRaw):
try:
# check the binary path and create object
binary = llspy.cudabinwrapper.CUDAbin(_CUDABIN)
except Exception:
self.error.emit()
raise
self.__argQueue = divide_arg_queue(self.E, len(self.GPU_SET), binary)
# with the argQueue populated, we can now start the workers
if not len(self.__argQueue):
self._logger.error(
"No channel arguments to process in LLSitem: %s" % self.shortname
)
self._logger.debug(f"LLSitemWorker FINISH: {self.E.basename}")
self.finished.emit()
return
self.startCUDAWorkers()
else:
self.post_process()
self.timer = QtCore.QTime()
self.timer.restart()
def startCUDAWorkers(self):
# initialize the workers and threads
for gpu in self.GPU_SET:
# create new CUDAworker for every thread
# each CUDAworker will control one cudaDeconv process (which only gets
# one wavelength at a time)
# grab the next arguments from the queue
# THIS BREAKS the relationship between num_cuda_threads
# and self.__CUDAworkers_done...
# if len(self.__argQueue)== 0:
# return
if not len(self.__argQueue):
return
args = self.__argQueue.pop(0)
CUDAworker, thread = newWorkerThread(
CudaDeconvWorker,
args,
{"CUDA_VISIBLE_DEVICES": gpu},
wid=gpu,
workerConnect={
# get progress messages from CUDAworker and pass to parent
"file_finished": self.on_file_finished,
"finished": self.on_CUDAworker_done,
# any messages go straight to the log window
# 'error': self.errorstring # implement error signal?
},
)
# need to store worker too otherwise will be garbage collected
self.__CUDAthreads[gpu] = (thread, CUDAworker)
# connect mainGUI abort CUDAworker signal to the new CUDAworker
self.sig_abort.connect(CUDAworker.abort)
# start the thread
thread.start()
@QtCore.Slot()
def on_file_finished(self):
# update status bar
self.nFiles_done = self.nFiles_done + 1
self.status_update.emit(
"Processing {}: ({} of {})".format(
self.shortname, self.nFiles_done, self.nFiles
)
)
# update progress bar
self.progressUp.emit()
# update the countdown timer with estimate of remaining time
avgTimePerFile = int(self.timer.elapsed() / self.nFiles_done)
filesToGo = self.nFiles - self.nFiles_done + 1
remainingTime = filesToGo * avgTimePerFile
timeAsString = QtCore.QTime(0, 0).addMSecs(remainingTime).toString()
self.clockUpdate.emit(timeAsString)
@QtCore.Slot(int)
def on_CUDAworker_done(self, worker_id):
# a CUDAworker has finished... update the log and check if any are still going
logger.debug(f"CudaDeconv Worker on GPU {worker_id} finished")
thread, _ = self.__CUDAthreads[worker_id]
thread.quit()
thread.wait()
self.__CUDAthreads[worker_id] = None
# FIXME: this forces all GPUs to be done before ANY can continue
# ... only as fast as the slowest GPU
# could probably fix easily by delivering a value to startCudaWorkers
# instead of looping through gpu inside of it
if not any([v for v in self.__CUDAthreads.values()]):
# if there's still stuff left in the argQueue for this item, keep going
if self.aborted:
self.aborted = False
self.finished.emit()
elif len(self.__argQueue):
self.startCUDAWorkers()
# otherwise send the signal that this item is done
else:
self.post_process()
def post_process(self):
if self.P.doReg:
self.status_update.emit(f"Doing Channel Registration: {self.E.basename}")
try:
self.E.register(
self.P.regRefWave,
self.P.regMode,
self.P.regCalibPath,
discard=self.P.deleteUnregistered,
)
except Exception:
self._logger.error("REGISTRATION FAILED")
raise
if self.P.mergeMIPs:
self.status_update.emit(f"Merging MIPs: {self.E.basename}")
self.E.mergemips()
else:
for mipfile in self.E.path.glob("**/*comboMIP_*"):
mipfile.unlink() # clean up any combo MIPs from previous runs
# if self.P.mergeMIPsraw:
# if self.E.path.joinpath('Deskewed').is_dir():
# self.status_update.emit(
# 'Merging raw MIPs: {}'.format(self.E.basename))
# self.E.mergemips('Deskewed')
# if we did camera correction, move the resulting processed folders to
# the parent folder, and optionally delete the corrected folder
if self.P.moveCorrected and self.E.path.name == "Corrected":
llspy.llsdir.move_corrected(str(self.E.path))
self.E.path = self.E.path.parent
if not self.P.keepCorrected:
shutil.rmtree(str(self.E.path.joinpath("Corrected")), ignore_errors=True)
if self.P.compressRaw:
self.status_update.emit(f"Compressing Raw: {self.E.basename}")
self.E.compress()
if self.P.writeLog:
outname = str(
self.E.path.joinpath(f"{self.E.basename}_{llspy.config.__OUTPUTLOG__}")
)
try:
with open(outname, "w") as outfile:
json.dump(self.P, outfile, cls=llspy.util.paramEncoder)
except FileNotFoundError:
self._logger.error("Could not write processing log file.")
self.finished.emit()
@QtCore.Slot()
def abort(self):
self._logger.info(f"LLSworker #{self.__id} notified to abort")
if any([v for v in self.__CUDAthreads.values()]):
self.aborted = True
self.__argQueue = []
self.sig_abort.emit()
# self.processButton.setDisabled(True) # will be reenabled when workers done
else:
self.finished.emit()
class TimePointWorker(QtCore.QObject):
"""docstring for TimePointWorker"""
finished = QtCore.Signal()
previewReady = QtCore.Signal(np.ndarray, float, float, dict)
updateCrop = QtCore.Signal(int, int)
def __init__(self, lls_dir, tRange, cRange, opts, ditch_partial=True, **kwargs):
super().__init__()
if isinstance(lls_dir, llspy.LLSdir):
self.E = lls_dir
self.path = str(self.E.path)
else:
# assume it's a string
self.path = str(lls_dir)
self.E = llspy.LLSdir(self.path, ditch_partial)
self.tRange = tRange
self.cRange = cRange
self.opts = opts
self._logger = logging.getLogger("llspy.worker." + type(self).__name__)
@QtCore.Slot()
def work(self):
if not self.E.parameters.isReady():
self.finished.emit()
raise err.InvalidSettingsError(
"Parameters are incomplete for this item. "
"Please add any missing/higlighted parameters."
)
try:
previewStack = self.E.preview(self.tRange, self.cRange, **self.opts)
if previewStack is not None:
self.previewReady.emit(
previewStack,
self.E.parameters.dx,
self.E.parameters.dzFinal,
self.E._localParams,
)
# TODO: this needs to be a signal, but shold only be emitted when the caller
# was the preview button (not a watcher)
if self.opts["cropMode"] == "auto":
wd = self.E.get_feature_width(
pad=self.opts["cropPad"], t=np.min(self.tRange)
)
self.updateCrop.emit(wd["width"], wd["offset"])
else:
raise err.InvalidSettingsError("No stacks to preview... check tRange")
except Exception:
self.finished.emit()
raise
self.finished.emit()
|
<reponame>EuphoriaYan/sales_pred
# -*- coding: utf-8 -*-
# @Time: 2021/3/13 15:47
# @Author: Euphoria
# @File: model.py
import os
import sys
import torch
from torch import nn
# 很简单的3层mlp
class mlp(nn.Module):
def __init__(self, in_feature, **kwargs):
super().__init__()
self.in_feature = in_feature
self.relu = nn.ReLU()
self.linear1 = nn.Linear(in_feature, in_feature)
self.dropout1 = nn.Dropout(p=0.3)
self.linear2 = nn.Linear(in_feature, in_feature // 2)
self.dropout2 = nn.Dropout(p=0.1)
self.linear3 = nn.Linear(in_feature // 2, 1)
def forward(self, input):
x = self.linear1(input)
x = self.dropout1(x)
x = self.relu(x)
x = self.linear2(x)
x = self.dropout2(x)
x = self.relu(x)
x = self.linear3(x)
return x
# 很简单的单层LSTM
class lstm(nn.Module):
def __init__(self, in_feature, bidirectional, **kwargs):
super().__init__()
self.in_feature = in_feature
self.bidirectional = True if bidirectional else False
self.relu = nn.ReLU()
self.linear1 = nn.Linear(in_feature, in_feature)
self.dropout1 = nn.Dropout(p=0.1)
hidden_size = in_feature
self.RNN = nn.LSTM(
input_size=in_feature,
hidden_size=hidden_size,
batch_first=True,
bidirectional=self.bidirectional,
num_layers=1
)
if bidirectional:
self.linear2 = nn.Linear(hidden_size * 2, 1)
else:
self.linear2 = nn.Linear(hidden_size, 1)
def forward(self, input):
x = self.linear1(input)
x = self.dropout1(x)
x = self.relu(x)
x, (hn, cn) = self.RNN(x)
x = self.linear2(x)
return x
# 带Attn的单层LSTM, linear1可能可以去掉,可以自己做一下实验对比
# 也可以做一下单双向lstm的对比
class lstm_attn(nn.Module):
def __init__(self, in_feature, bidirectional, **kwargs):
super().__init__()
self.in_feature = in_feature
self.bidirectional = True if bidirectional else False
self.linear1 = nn.Linear(in_feature, in_feature)
self.RNN = nn.LSTM(
input_size=in_feature,
hidden_size=in_feature,
batch_first=True,
bidirectional=self.bidirectional
)
# 用独立矩阵来做attn的方案会有问题,注释掉了
if self.bidirectional:
# self.w = nn.Parameter(torch.Tensor(in_feature * 2, in_feature * 2))
# self.u = nn.Parameter(torch.Tensor(in_feature * 2, 1))
self.linear2 = nn.Linear(in_feature * 2, 1)
else:
# self.w = nn.Parameter(torch.Tensor(in_feature, in_feature))
# self.u = nn.Parameter(torch.Tensor(in_feature, 1))
self.linear2 = nn.Linear(in_feature, 1)
def forward(self, input):
x = self.linear1(input)
x, (hn, cn) = self.RNN(x)
# 这里用的是用hidden state来做attn的方案
if self.bidirectional:
hn = hn.view(-1, self.in_feature * 2, 1)
else:
hn = hn.view(-1, self.in_feature, 1)
attn = torch.bmm(x, hn)
attn_weights = torch.softmax(attn, dim=1)
x = x * attn_weights
# 用独立矩阵来做attn的方案会有问题,注释掉了
'''
u = torch.tanh(torch.matmul(x, self.w))
attn = torch.matmul(u, self.u)
attn_weights = torch.softmax(attn, dim=1)
x = x * attn_weights
'''
x = self.linear2(x)
return x
if __name__ == '__main__':
pass
|
<filename>bfd/datastore/logic.py
"""
Defines the logical operations that make use of the data layer.
Copyright (C) 2020 <NAME>.
"Commons Clause" License Condition v1.0:
The Software is provided to you by the Licensor under the License, as defined
below, subject to the following condition.
Without limiting other conditions in the License, the grant of rights under the
License will not include, and the License does not grant to you, the right to
Sell the Software.
For purposes of the foregoing, "Sell" means practicing any or all of the rights
granted to you under the License to provide to third parties, for a fee or
other consideration (including without limitation fees for hosting or
consulting/support services related to the Software), a product or service
whose value derives, entirely or substantially, from the functionality of the
Software. Any license notice or attribution required by the License must also
include this Commons Clause License Condition notice.
MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import structlog # type: ignore
from typing import Sequence, Union, List, Dict, Set
from django.db.models import Q # type: ignore
from django.http import HttpRequest # type: ignore
from django.contrib.auth.models import User # type: ignore
from django.utils.translation import gettext_lazy as _ # type: ignore
from datastore import models
# from datastore.query import evaluate
logger = structlog.get_logger()
def create_namespace(
user: User,
name: str,
description: str,
admins: Union[Sequence[User], None] = None,
) -> models.Namespace:
"""
Create a new namespace with the referenced description and user objects as
administrators. The user who creates the namespace is automatically
assigned administrator status.
Only site admins can create new arbitrary namespaces.
Regular users may only create a namespace if the name of the namespace is
the same as their (unique) username.
Any other user making such a request will result in a PermissionError
being thrown.
"""
if user.is_admin or user.username == name:
n = models.Namespace.objects.create_namespace(
name=name, description=description, user=user
)
admin_list: List[User] = [
user,
]
if admins:
admin_list += [u for u in admins if u.id != user.id]
n.admins.add(*admin_list)
logger.msg(
"Create namespace.",
user=user.username,
namespace=name,
description=description,
admins=[admin.username for admin in admin_list],
)
return n
else:
raise PermissionError(
_("User doesn't have permission to create a new namespace.")
)
def get_namespace(user: User, name: str) -> Dict:
"""
Return a dictionary representation of the referenced Namespace as viewed by
the referenced user (with associated privileges).
Admin users see all attributes of all aspects of the namespace. Regular
users see a limited set of attributes on only those aspects of the
namespace for which they have privileges to see.
"""
n = models.Namespace.objects.get(name=name)
result = {
"name": n.name,
"description": n.description,
}
tags: List[Dict] = []
if user.is_admin or n.admins.filter(pk=user.pk).exists():
result["created_by"] = n.created_by.username
result["created_on"] = str(n.created_on)
result["updated_by"] = n.updated_by.username
result["updated_on"] = str(n.updated_on)
result["admins"] = [admin.username for admin in n.admins.all()]
for tag in n.tag_set.all():
tags.append(
{
"name": tag.name,
"description": tag.description,
"type_of": tag.get_type_of_display(),
"private": tag.private,
"users": [user.username for user in tag.users.all()],
"readers": [
reader.username for reader in tag.readers.all()
],
"created_by": tag.created_by.username,
"created_on": str(tag.created_on),
"updated_by": tag.updated_by.username,
"updated_on": str(tag.updated_on),
}
)
else:
# Get all public tags, or tags where the user is a user or reader.
query = n.tag_set.filter(
Q(private=False) | Q(users__id=user.id) | Q(readers__id=user.id)
)
for tag in query:
tags.append(
{
"name": tag.name,
"description": tag.description,
"type_of": tag.get_type_of_display(),
}
)
result["tags"] = tags
return result
def update_namespace_description(
user: User, name: str, description: str
) -> models.Namespace:
"""
Update the description of the namespace with the referenced name.
Only site admins or regular users in the namespace's "admins" group may
make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=name)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
n.description = description
n.save()
logger.msg(
"Update namespace description.",
user=user.username,
namespace=name,
description=description,
)
return n
else:
raise PermissionError(
_("User doesn't have permission to describe a namespace.")
)
def add_namespace_admins(
user: User, name: str, admins: Sequence[User]
) -> models.Namespace:
"""
Add the referenced user objects as administrators of the Namespace.
Only site admins or regular users in the namespace's "admins" group may
make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=name)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
n.admins.add(*admins)
logger.msg(
"Add namespace administrators.",
user=user.username,
namespace=name,
admins=[admin.username for admin in admins],
)
return n
else:
raise PermissionError(
_("User doesn't have permission to add admins to namespace.")
)
def remove_namespace_admins(
user: User, name: str, admins: Sequence[User]
) -> models.Namespace:
"""
Remove the referenced user objects as administrators of the Namespace.
Only site admins or regular users in the namespace's "admins" group may
make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=name)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
n.admins.remove(*admins)
logger.msg(
"Remove namespace administrators.",
user=user.username,
namespace=name,
admins=[admin.username for admin in admins],
)
return n
else:
raise PermissionError(
_("User doesn't have permission to remove admins from namespace.")
)
def create_tag(
user: User,
name: str,
description: str,
type_of: str,
namespace: models.Namespace,
private: bool,
users: Union[Sequence[User], None] = None,
readers: Union[Sequence[User], None] = None,
) -> models.Tag:
"""
Create a new tag with the referenced name, description, type and namespace.
Only users may use the resulting tag to annotate data onto objects.
If the private flag is True then the readers should contain users who are
exceptions to the private flag.
Users may read and annotate with the tag. Readers may only read.
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
if user.is_admin or namespace.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.create_tag(
user=user,
name=name,
description=description,
type_of=type_of,
namespace=namespace,
private=private,
)
users_list: List[User] = [
user,
]
if users:
users_list += [u for u in users if u.id != user.id]
t.users.add(*users_list)
if readers:
t.readers.add(*readers)
else:
readers = []
logger.msg(
"Create tag.",
user=user.username,
name=name,
description=description,
type_of=t.get_type_of_display(),
namespace=namespace.name,
private=private,
users=[u.username for u in users_list],
readers=[r.username for r in readers],
)
return t
else:
raise PermissionError(
_("User doesn't have permission to create a tag in the namespace.")
)
def get_tag(user: User, name: str, namespace: str) -> Dict:
"""
Return a dictionary representation of the referenced tag as viewed by
the referenced user (with associated privileges).
Admin users of the parent namespace see all attributes of all aspects of
the tag. Regular users see a limited set of attributes on only those
aspects of the tag for which they have privileges to see.
"""
n = models.Namespace.objects.get(name=namespace)
tag = models.Tag.objects.get(name=name, namespace=n)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
result = {
"name": tag.name,
"namespace": n.name,
"description": tag.description,
"path": tag.path,
"type_of": tag.get_type_of_display(),
"private": tag.private,
"users": [user.username for user in tag.users.all()],
"readers": [reader.username for reader in tag.readers.all()],
"created_by": tag.created_by.username,
"created_on": str(tag.created_on),
"updated_by": tag.updated_by.username,
"updated_on": str(tag.updated_on),
}
else:
if tag.is_reader(user):
result = {
"name": tag.name,
"namespace": n.name,
"description": tag.description,
"path": tag.path,
"type_of": tag.get_type_of_display(),
"private": tag.private,
}
else:
raise PermissionError(
_("User doesn't have permission to view the tag.")
)
return result
def update_tag_description(
user: User, name: str, namespace: str, description: str
) -> models.Tag:
"""
Update the description of the tag with the referenced name and namespace.
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.description = description
t.save()
logger.msg(
"Update tag description.",
user=user.username,
tag=name,
namespace=namespace,
description=description,
)
return t
else:
raise PermissionError(
_("User doesn't have permission to update the tag.")
)
def set_tag_private(
user: User, name: str, namespace: str, private: bool
) -> models.Tag:
"""
Set the referenced tag's private flag.
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.private = private
t.save()
logger.msg(
"Update tag privacy.",
user=user.username,
tag=name,
namespace=namespace,
private=private,
)
return t
else:
raise PermissionError(
_("User doesn't have permission to update the tag.")
)
def add_tag_users(
user: User, name: str, namespace: str, users: Sequence[User]
) -> models.Tag:
"""
Add the referenced user objects to the tag's users list (who can both
annotate and read the tag).
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.users.add(*users)
logger.msg(
"Add tag users.",
user=user.username,
tag=name,
namespace=namespace,
users=[u.username for u in users],
)
return t
else:
raise PermissionError(
_("User doesn't have permission to add users to the tag.")
)
def remove_tag_users(
user: User, name: str, namespace: str, users: Sequence[User]
) -> models.Tag:
"""
Remove the referenced user object from the tag's users list (who can both
annotate and read the tag).
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.users.remove(*users)
logger.msg(
"Remove tag users.",
user=user.username,
tag=name,
namespace=namespace,
users=[u.username for u in users],
)
return t
else:
raise PermissionError(
_("User doesn't have permission to remove users from the tag.")
)
def add_tag_readers(
user: User, name: str, namespace: str, readers: Sequence[User]
) -> models.Tag:
"""
Add the referenced user objects to the tag's readers list (who can read a
private tag).
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.readers.add(*readers)
logger.msg(
"Add tag readers.",
user=user.username,
tag=name,
namespace=namespace,
readers=[r.username for r in readers],
)
return t
else:
raise PermissionError(
_("User doesn't have permission to add readers to the tag.")
)
def remove_tag_readers(
user: User, name: str, namespace: str, readers: Sequence[User]
) -> models.Tag:
"""
Remove the referenced user objects from the tag's readers list (those who
can read a private tag).
Only site admins or regular users in the parent namespace's "admins" group
may make this change.
Any other user making such a request will result in a PermissionError
being thrown.
"""
n = models.Namespace.objects.get(name=namespace)
if user.is_admin or n.admins.filter(pk=user.pk).exists():
t = models.Tag.objects.get(name=name, namespace=n)
t.readers.remove(*readers)
logger.msg(
"Remove tag readers.",
user=user.username,
tag=name,
namespace=namespace,
readers=[r.username for r in readers],
)
return t
else:
raise PermissionError(
_("User doesn't have permission to remove readers from the tag.")
)
def check_users_tags(user: models.User, tags: Set[str]) -> bool:
"""
Given a list of namespace/tag tuples, return a boolean to indicate that the
referenced user is allowed to use such tags to annotate values onto
objects.
"""
# Site admins always have privileges.
if user.is_admin:
return True
# Count the number of tags that the user has permission to use.
tag_matches = models.get_users_query(user, tags).count()
# If the number of tag_matches is the same as the number of unique tags to
# be checked, then the user MUST have permission to read on all the
# referenced tags.
return tag_matches == len(set(tags))
def check_readers_tags(user: models.User, tags: Set[str]) -> bool:
"""
Given a list of namespace/tag tuples, return a boolean to indicate that the
referenced user is allowed to use such tags to read the values annotated
onto objects.
"""
# Site admins always have privileges.
if user.is_admin:
return True
# Count the number of tags that the user has permission to use.
tag_matches = models.get_readers_query(user, tags).count()
# If the number of tag_matches is the same as the number of unique tags to
# be checked, then the user MUST have permission to read on all the
# referenced tags.
return tag_matches == len(set(tags))
def set_object_tag_value(
user: models.User,
object_id: str,
namespace: str,
tag: str,
value: HttpRequest,
):
"""
Set the referenced namespace/tag on the specified object to the value
contained within the incoming HttpRequest. Assumes the privileges of the
referenced user.
The type of the value in the HttpRequest is infered (and checked against)
the type of the referenced tag.
If the operation failed, an exception will be raised.
"""
def set_object_tag_values(user: User, object_tag_values: Sequence[Dict]):
"""
Set a number of unique namespace/tag values on each of the objects
referenced in the sequence. Assumes the privileges of the referenced user.
The object_tag_values should be of the form:
[
{
"object_id": "my-object1",
"values": {
"namespace/tag": "a value to store",
"namespace2/tag2": 123,
},
},
{
"object_id": "my-object2",
"values": {
"namespace3/ta3": "a unique value",
"namespace2/tag2": 456,
},
},
... next object with other unique tag values...
]
If the operation failed, an exception will be raised.
"""
def set_object_tag_values_by_query(
user: User, query: str, tag_values: Sequence[Dict]
):
"""
Sets the same namespace/tag values to each of the objects matched by the
BFQL query. Assumes the privileges of the referenced user.
If the operation failed, an exception will be raised.
"""
def get_object_tags(user: User, object_id: str) -> Sequence[str]:
"""
Return all the tags associated with the referenced object that are visible
to the referenced user.
"""
def get_object_tag_value(user: User, object_id: str, namespace: str, tag: str):
"""
Return the value associated with the namespace/tag pair on the referenced
object given its visibility to the referenced user.
"""
def get_object_values(user: User, object_id: str, tags: Sequence):
"""
Get the values associated with the referenced tags on the referenced object
that are visible to the referenced user.
"""
def get_object_tag_values_by_query(user: User, query: str, tags: Sequence):
"""
Get the values associated with the referenced tags on objects that match
the BFQL query. Only tags/values visible to the referenced user will be
returned.
The tags sequence should be of the form:
[
"namespace1/tag1",
"namespace2/tag2",
... etc...
]
"""
def delete_tags_from_object(user: User, object_id: str, tags: Sequence):
"""
Delete referenced tag-values from the referenced object.
The tags sequence should be of the form:
[
"namespace1/tag1",
"namespace2/tag2",
... etc...
]
"""
def delete_object_tag_values_by_query(user: User, query: str, tags: Sequence):
"""
Delete referenced tag-values from objects that match the BFQL query.
The tags sequence should be of the form:
[
"namespace1/tag1",
"namespace2/tag2",
... etc...
]
"""
|
#!/usr/bin/env python3
""" This entry knows how to manipulate Python's module path to jump the queue.
"""
import sys
def use(abs_packages_dir):
"""Make an entry that contains an installed pip package the preferred location to import the package from.
Usage examples :
axs byname numpy_1.16.4_pip , use , python_api 'import numpy\nprint(numpy.__version__)'
axs np: byname numpy_1.16.4_pip , use , get np , ask_package_location
"""
sys.path.insert(0, abs_packages_dir)
return abs_packages_dir
def get_metadata_path(abs_packages_dir, package_name, metadata_filename='METADATA'):
"""Locate the METADATA file that gets installed alongside the pip package, see
https://packaging.python.org/specifications/recording-installed-packages/
Usage examples:
axs byname generic_pip , get_metadata_path $HOME/CK-TOOLS/lib-python-onnx-compiler.python-3.6.8-precompiled-macos-64/build onnx
"""
import os.path
from glob import glob
distinfo_path_pattern = os.path.join( abs_packages_dir, '*.dist-info' )
for distinfo_path in glob( distinfo_path_pattern ):
distinfo_name = os.path.basename( distinfo_path ).split( '-', 1 )[0]
toplevel_path = os.path.join( distinfo_path, 'top_level.txt' )
if os.path.isfile( toplevel_path ):
with open( toplevel_path, 'r' ) as toplevel_fp:
toplevel = toplevel_fp.readline().rstrip()
else:
toplevel = distinfo_name
if package_name.lower() in (toplevel.lower(), distinfo_name.lower()):
return os.path.join( distinfo_path, metadata_filename )
assert False, f"Could not find METADATA for package={package_name} when abs_packages_dir={abs_packages_dir}"
return None
def get_metadata(abs_packages_dir, package_name, header_name=None):
"""Parse the METADATA file that gets installed alongside the pip package using email header parser, see
https://packaging.python.org/specifications/core-metadata/
Returns either all header names (if --header_name is not specified)
or a list of values corresponding to the specified --header_name
Usage examples:
axs byname generic_pip , get_metadata $HOME/CK-TOOLS/lib-python-onnx-compiler.python-3.6.8-precompiled-macos-64/build onnx Version
axs byname numpy_1.16.4_pip , get_metadata
axs byname numpy_1.16.4_pip , get_metadata --header_name=Platform
"""
from email.parser import BytesHeaderParser
from email.policy import default
metadata_path = get_metadata_path(abs_packages_dir, package_name)
with open(metadata_path, 'rb') as metadata_fp:
headers = BytesHeaderParser(policy=default).parse(metadata_fp)
if header_name:
return [ v for k, v in headers.items() if k==header_name ]
else:
return headers.keys()
def get_deps(abs_packages_dir, package_name):
"""Parse the METADATA file that gets installed alongside the pip package using email header parser
and return the list of package dependencies, see:
https://packaging.python.org/specifications/core-metadata/#requires-dist-multiple-use
Usage examples:
axs byname generic_pip , get_deps $HOME/CK-TOOLS/lib-python-onnx-compiler.python-3.6.8-precompiled-macos-64/build onnx
axs byname numpy_1.16.4_pip , get_deps
"""
return get_metadata( abs_packages_dir, package_name, 'Requires-Dist' )
def ask_package_version(package_name):
"""Ask the pip package that is currently the preferred one for its version.
This could work both with a use'd or the one importable by default.
Usage examples :
axs np: byname numpy_1.16.4_pip , use , get np , ask_package_version
axs gen: byname generic_pip , use $HOME/CK-TOOLS/lib-python-onnx-compiler.python-3.6.8-precompiled-macos-64/build , get gen , ask_package_version onnx
axs byname generic_pip , ask_package_version numpy
"""
module = __import__(package_name)
return getattr(module, '__version__')
def ask_package_location(package_name):
"""Ask the pip package that is currently the preferred one for its file location.
This could work both with a use'd or the one importable by default.
Usage examples :
axs np: byname numpy_1.16.4_pip , use , get np , ask_package_location
axs byname generic_pip , ask_package_location numpy
"""
module = __import__(package_name)
return getattr(module, '__file__')
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.Principal import Principal
class SignTask(object):
def __init__(self):
self._biz_data = None
self._biz_id = None
self._cb_type = None
self._cb_url = None
self._cert_sign_type = None
self._enter_type = None
self._principal_list = None
self._signer_type = None
self._task_expire = None
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def cb_type(self):
return self._cb_type
@cb_type.setter
def cb_type(self, value):
self._cb_type = value
@property
def cb_url(self):
return self._cb_url
@cb_url.setter
def cb_url(self, value):
self._cb_url = value
@property
def cert_sign_type(self):
return self._cert_sign_type
@cert_sign_type.setter
def cert_sign_type(self, value):
self._cert_sign_type = value
@property
def enter_type(self):
return self._enter_type
@enter_type.setter
def enter_type(self, value):
self._enter_type = value
@property
def principal_list(self):
return self._principal_list
@principal_list.setter
def principal_list(self, value):
if isinstance(value, list):
self._principal_list = list()
for i in value:
if isinstance(i, Principal):
self._principal_list.append(i)
else:
self._principal_list.append(Principal.from_alipay_dict(i))
@property
def signer_type(self):
return self._signer_type
@signer_type.setter
def signer_type(self, value):
self._signer_type = value
@property
def task_expire(self):
return self._task_expire
@task_expire.setter
def task_expire(self, value):
self._task_expire = value
def to_alipay_dict(self):
params = dict()
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.cb_type:
if hasattr(self.cb_type, 'to_alipay_dict'):
params['cb_type'] = self.cb_type.to_alipay_dict()
else:
params['cb_type'] = self.cb_type
if self.cb_url:
if hasattr(self.cb_url, 'to_alipay_dict'):
params['cb_url'] = self.cb_url.to_alipay_dict()
else:
params['cb_url'] = self.cb_url
if self.cert_sign_type:
if hasattr(self.cert_sign_type, 'to_alipay_dict'):
params['cert_sign_type'] = self.cert_sign_type.to_alipay_dict()
else:
params['cert_sign_type'] = self.cert_sign_type
if self.enter_type:
if hasattr(self.enter_type, 'to_alipay_dict'):
params['enter_type'] = self.enter_type.to_alipay_dict()
else:
params['enter_type'] = self.enter_type
if self.principal_list:
if isinstance(self.principal_list, list):
for i in range(0, len(self.principal_list)):
element = self.principal_list[i]
if hasattr(element, 'to_alipay_dict'):
self.principal_list[i] = element.to_alipay_dict()
if hasattr(self.principal_list, 'to_alipay_dict'):
params['principal_list'] = self.principal_list.to_alipay_dict()
else:
params['principal_list'] = self.principal_list
if self.signer_type:
if hasattr(self.signer_type, 'to_alipay_dict'):
params['signer_type'] = self.signer_type.to_alipay_dict()
else:
params['signer_type'] = self.signer_type
if self.task_expire:
if hasattr(self.task_expire, 'to_alipay_dict'):
params['task_expire'] = self.task_expire.to_alipay_dict()
else:
params['task_expire'] = self.task_expire
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SignTask()
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'cb_type' in d:
o.cb_type = d['cb_type']
if 'cb_url' in d:
o.cb_url = d['cb_url']
if 'cert_sign_type' in d:
o.cert_sign_type = d['cert_sign_type']
if 'enter_type' in d:
o.enter_type = d['enter_type']
if 'principal_list' in d:
o.principal_list = d['principal_list']
if 'signer_type' in d:
o.signer_type = d['signer_type']
if 'task_expire' in d:
o.task_expire = d['task_expire']
return o
|
<gh_stars>0
"""
Import as:
import core.signal_processing as csigna
"""
import collections
import functools
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import pywt
import helpers.dataframe as hdataf
import helpers.dbg as dbg
_LOG = logging.getLogger(__name__)
# #############################################################################
# Correlation helpers
# #############################################################################
def correlate_with_lag(
df: pd.DataFrame, lag: Union[int, List[int]]
) -> pd.DataFrame:
"""
Combine cols of `df` with their lags and compute the correlation matrix.
:param df: dataframe of numeric values
:param lag: number of lags to apply or list of number of lags
:return: correlation matrix with `(1 + len(lag)) * df.columns` columns
"""
dbg.dassert_isinstance(df, pd.DataFrame)
if isinstance(lag, int):
lag = [lag]
elif isinstance(lag, list):
pass
else:
raise ValueError("Invalid `type(lag)`='%s'" % type(lag))
lagged_dfs = [df]
for lag_curr in lag:
df_lagged = df.shift(lag_curr)
df_lagged.columns = df_lagged.columns.astype(str) + f"_lag_{lag_curr}"
lagged_dfs.append(df_lagged)
merged_df = pd.concat(lagged_dfs, axis=1)
return merged_df.corr()
def correlate_with_lagged_cumsum(
df: pd.DataFrame,
lag: int,
y_vars: List[str],
x_vars: Optional[List[str]] = None,
nan_mode: Optional[str] = None,
) -> pd.DataFrame:
"""
Compute correlation matrix of `df` cols and lagged cumulative sums.
The flow is the following:
- Compute cumulative sums of `y_vars` columns for `num_steps = lag`
- Lag them so that `x_t` aligns with `y_{t+1} + ... + y{t+lag}`
- Compute correlation of `df` columns (other than `y_vars`) and the
lagged cumulative sums of `y_vars`
This function can be applied to compute correlations between predictors and
cumulative log returns.
:param df: dataframe of numeric values
:param lag: number of time points to shift the data by. Number of steps to
compute rolling sum is `lag` too.
:param y_vars: names of columns for which to compute cumulative sum
:param x_vars: names of columns to correlate the `y_vars` with. If `None`,
defaults to all columns except `y_vars`
:param nan_mode: argument for hdataf.apply_nan_mode()
:return: correlation matrix of `(len(x_vars), len(y_vars))` shape
"""
dbg.dassert_isinstance(df, pd.DataFrame)
dbg.dassert_isinstance(y_vars, list)
x_vars = x_vars or df.columns.difference(y_vars).tolist()
dbg.dassert_isinstance(x_vars, list)
dbg.dassert_lte(
1,
len(x_vars),
"There are no columns to compute the correlation of cumulative "
"returns with. ",
)
df = df[x_vars + y_vars].copy()
cumsum_df = _compute_lagged_cumsum(df, lag, y_vars=y_vars, nan_mode=nan_mode)
corr_df = cumsum_df.corr()
y_cumsum_vars = cumsum_df.columns.difference(x_vars)
return corr_df.loc[x_vars, y_cumsum_vars]
def _compute_lagged_cumsum(
df: pd.DataFrame,
lag: int,
y_vars: Optional[List[str]] = None,
nan_mode: Optional[str] = None,
) -> pd.DataFrame:
"""
Compute lagged cumulative sum for selected columns.
Align `x_t` with `y_{t+1} + ... + y{t+lag}`.
:param df: dataframe of numeric values
:param lag: number of time points to shift the data by. Number of steps to
compute rolling sum is `lag`
:param y_vars: names of columns for which to compute cumulative sum. If
`None`, compute for all columns
:param nan_mode: argument for hdataf.apply_nan_mode()
:return: dataframe with lagged cumulative sum columns
"""
dbg.dassert_isinstance(df, pd.DataFrame)
y_vars = y_vars or df.columns.tolist()
dbg.dassert_isinstance(y_vars, list)
x_vars = df.columns.difference(y_vars)
y = df[y_vars].copy()
x = df[x_vars].copy()
# Compute cumulative sum.
y_cumsum = y.apply(accumulate, num_steps=lag, nan_mode=nan_mode)
y_cumsum.rename(columns=lambda x: f"{x}_cumsum_{lag}", inplace=True)
# Let's lag `y` so that `x_t` aligns with `y_{t+1} + ... + y{t+lag}`.
y_cumsum_lagged = y_cumsum.shift(-lag)
y_cumsum_lagged.rename(columns=lambda z: f"{z}_lag_{lag}", inplace=True)
#
merged_df = x.merge(y_cumsum_lagged, left_index=True, right_index=True)
return merged_df
def calculate_inverse(
df: pd.DataFrame,
p_moment: Optional[Any] = None,
info: Optional[collections.OrderedDict] = None,
) -> pd.DataFrame:
"""
Calculate an inverse matrix.
:param df: matrix to invert
:param p_moment: order of the matrix norm as in `np.linalg.cond`
:param info: dict with info to add the condition number to
"""
dbg.dassert_isinstance(df, pd.DataFrame)
dbg.dassert_eq(
df.shape[0], df.shape[1], "Only square matrices are invertible."
)
dbg.dassert(
df.apply(lambda s: pd.to_numeric(s, errors="coerce").notnull()).all(
axis=None
),
"The matrix is not numeric.",
)
dbg.dassert_ne(np.linalg.det(df), 0, "The matrix is non-invertible.")
if info is not None:
info["condition_number"] = np.linalg.cond(df, p_moment)
return pd.DataFrame(np.linalg.inv(df), df.columns, df.index)
def calculate_pseudoinverse(
df: pd.DataFrame,
rcond: Optional[float] = 1e-15,
hermitian: Optional[bool] = False,
p_moment: Optional[Any] = None,
info: Optional[collections.OrderedDict] = None,
) -> pd.DataFrame:
"""
Calculate a pseudoinverse matrix.
:param df: matrix to pseudo-invert
:param rcond: cutoff for small singular values as in `np.linalg.pinv`
:param hermitian: if True, `df` is assumed to be Hermitian
:param p_moment: order of the matrix norm as in `np.linalg.cond`
:param info: dict with info to add the condition number to
"""
dbg.dassert_isinstance(df, pd.DataFrame)
dbg.dassert(
df.apply(lambda s: pd.to_numeric(s, errors="coerce").notnull()).all(
axis=None
),
"The matrix is not numeric.",
)
if info is not None:
info["condition_number"] = np.linalg.cond(df, p_moment)
return pd.DataFrame(
np.linalg.pinv(df, rcond=rcond, hermitian=hermitian), df.columns, df.index
)
# #############################################################################
# Signal transformations
# #############################################################################
def squash(
signal: Union[pd.DataFrame, pd.Series], scale: int = 1
) -> Union[pd.DataFrame, pd.Series]:
"""
Apply squashing function to data.
:param signal: data
:param scale: Divide data by scale and multiply squashed output by scale.
Rescaling approximately preserves behavior in a neighborhood of the
origin where the squashing function is approximately linear.
:return: squashed data
"""
dbg.dassert_lt(0, scale)
return scale * np.tanh(signal / scale)
def accumulate(
signal: Union[pd.DataFrame, pd.Series],
num_steps: int,
nan_mode: Optional[str] = None,
) -> Union[pd.DataFrame, pd.Series]:
"""
Accumulate series for step.
:param signal: time series or dataframe
:param num_steps: number of steps to compute rolling sum for
:param nan_mode: argument for hdataf.apply_nan_mode()
:return: time series or dataframe accumulated
"""
dbg.dassert_isinstance(num_steps, int)
dbg.dassert_lte(
1,
num_steps,
"`num_steps=0` returns all-zero dataframe. Passed in `num_steps`='%s'",
num_steps,
)
nan_mode = nan_mode or "leave_unchanged"
if isinstance(signal, pd.Series):
signal_cleaned = hdataf.apply_nan_mode(signal, mode=nan_mode)
signal_cumulative = signal_cleaned.rolling(window=num_steps).sum()
elif isinstance(signal, pd.DataFrame):
signal_cleaned = signal.apply(
(lambda x: hdataf.apply_nan_mode(x, mode=nan_mode)), axis=0
)
signal_cumulative = signal_cleaned.apply(
(lambda x: x.rolling(window=num_steps).sum()), axis=0
)
else:
raise ValueError(f"Invalid input type `{type(signal)}`")
return signal_cumulative
def get_symmetric_equisized_bins(
signal: pd.Series, bin_size: float, zero_in_bin_interior: bool = False
) -> np.array:
"""
Get bins of equal size, symmetric about zero, adapted to `signal`.
:param bin_size: width of bin
:param zero_in_bin_interior: Determines whether `0` is a bin edge or not.
If in interior, it is placed in the center of the bin.
:return: array of bin boundaries
"""
# Remove +/- inf for the purpose of calculating max/min.
finite_signal = signal.replace([-np.inf, np.inf], np.nan).dropna()
# Determine minimum and maximum bin boundaries based on values of `signal`.
# Make them symmetric for simplicity.
left = np.floor(finite_signal.min() / bin_size).astype(int) - 1
right = np.ceil(finite_signal.max() / bin_size).astype(int) + 1
bin_boundary = bin_size * np.maximum(np.abs(left), np.abs(right))
if zero_in_bin_interior:
right_start = bin_size / 2
else:
right_start = 0
right_bins = np.arange(right_start, bin_boundary, bin_size)
# Reflect `right_bins` to get `left_bins`.
if zero_in_bin_interior:
left_bins = -np.flip(right_bins)
else:
left_bins = -np.flip(right_bins[1:])
# Combine `left_bins` and `right_bin` into one bin.
return np.append(left_bins, right_bins)
def digitize(signal: pd.Series, bins: np.array, right: bool = False) -> pd.Series:
"""
Digitize (i.e., discretize) `signal` into `bins`.
- In the output, bins are referenced with integers and are such that `0`
always belongs to bin `0`
- The bin-referencing convention is optimized for studying signals centered
at zero (e.g., returns, z-scored features, etc.)
- For bins of equal size, the bin-referencing convention makes it easy to
map back from the digitized signal to numerical ranges given
- the bin number
- the bin size
:param bins: array-like bin boundaries. Must include max and min `signal`
values.
:param right: same as in `np.digitize`
:return: digitized signal
"""
# From https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html
# (v 1.17):
# > If values in x are beyond the bounds of bins, 0 or len(bins) is
# > returned as appropriate.
digitized = np.digitize(signal, bins, right)
# Center so that `0` belongs to bin "0"
bin_containing_zero = np.digitize([0], bins, right)
digitized -= bin_containing_zero
# Convert to pd.Series, since `np.digitize` only returns an np.array.
digitized_srs = pd.Series(
data=digitized, index=signal.index, name=signal.name
)
return digitized_srs
def _wrap(signal: pd.Series, num_cols: int) -> pd.DataFrame:
"""
Convert a 1-d series into a 2-d dataframe left-to-right top-to-bottom.
:param num_cols: number of columns to use for wrapping
"""
dbg.dassert_isinstance(signal, pd.Series)
dbg.dassert_lte(1, num_cols)
values = signal.values
_LOG.debug("num values=%d", values.size)
# Calculate number of rows that wrapped pd.DataFrame should have.
num_rows = np.ceil(values.size / num_cols).astype(int)
_LOG.debug("num_rows=%d", num_rows)
# Add padding, since numpy's `reshape` requires element counts to match
# exactly.
pad_size = num_rows * num_cols - values.size
_LOG.debug("pad_size=%d", pad_size)
padding = np.full(pad_size, np.nan)
padded_values = np.append(values, padding)
#
wrapped = padded_values.reshape(num_rows, num_cols)
return pd.DataFrame(wrapped)
def _unwrap(
df: pd.DataFrame, idx: pd.Index, name: Optional[Any] = None
) -> pd.Series:
"""
Undo `_wrap`.
We allow `index.size` to be less than nrows * ncols of `df`, in which case
values are truncated from the end of the unwrapped dataframe.
:param idx: index of series provided to `_wrap` call
"""
_LOG.debug("df.shape=%s", df.shape)
values = df.values.flatten()
pad_size = values.size - idx.size
_LOG.debug("pad_size=%d", pad_size)
if pad_size > 0:
data = values[:-pad_size]
else:
data = values
unwrapped = pd.Series(data=data, index=idx, name=name)
return unwrapped
def skip_apply_func(
signal: pd.DataFrame,
skip_size: int,
func: Callable[[pd.Series], pd.DataFrame],
**kwargs: Any,
) -> pd.DataFrame:
"""
Apply `func` to each col of `signal` after a wrap, then unwrap and merge.
:param skip_size: num_cols used for wrapping each col of `signal`
:param kwargs: forwarded to `func`
"""
cols = {}
for col in signal.columns:
wrapped = _wrap(signal[col], skip_size)
funced = func(wrapped, **kwargs)
unwrapped = _unwrap(funced, signal.index, col)
cols[col] = unwrapped
df = pd.DataFrame.from_dict(cols)
return df
# #############################################################################
# EMAs and derived kernels
# #############################################################################
def calculate_tau_from_com(com: float) -> Union[float, np.float]:
"""
Transform center-of-mass (com) into tau parameter.
This is the function inverse of `calculate_com_from_tau`.
"""
dbg.dassert_lt(0, com)
return 1.0 / np.log(1 + 1.0 / com)
def calculate_com_from_tau(tau: float) -> Union[float, np.float]:
"""
Transform tau parameter into center-of-mass (com).
We use the tau parameter for kernels (as in Dacorogna, et al), but for the
compute_ema operator want to take advantage of pandas' implementation, which uses
different parameterizations. We adopt `com` because
- It is almost equal to `tau`
- We have used it historically
:param tau: parameter used in (continuous) compute_ema and compute_ema-derived kernels. For
typical ranges it is approximately but not exactly equal to the
center-of-mass (com) associated with an compute_ema kernel.
:return: com
"""
dbg.dassert_lt(0, tau)
return 1.0 / (np.exp(1.0 / tau) - 1)
def compute_ema(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int,
depth: int = 1,
) -> Union[pd.DataFrame, pd.Series]:
r"""Implement iterated EMA operator (e.g., see 3.3.6 of Dacorogna, et al).
depth=1 corresponds to a single application of exponential smoothing.
Greater depth tempers impulse response, introducing a phase lag.
Dacorogna use the convention $\text{compute_ema}(t) = \exp(-t / \tau) / \tau$.
If $s_n = \lambda x_n + (1 - \lambda) s_{n - 1}$, where $s_n$ is the compute_ema
output, then $1 - \lambda = \exp(-1 / \tau)$. Now
$\lambda = 1 / (1 + \text{com})$, and rearranging gives
$\log(1 + 1 / \text{com}) = 1 / \tau$. Expanding in a Taylor series
leads to $\tau \approx \text{com}$.
The kernel for an compute_ema of depth $n$ is
$(1 / (n - 1)!) (t / \tau)^{n - 1} \exp^{-t / \tau} / \tau$.
Arbitrary kernels can be approximated by a combination of iterated emas.
For an iterated compute_ema with given tau and depth n, we have
- range = n \tau
- <t^2> = n(n + 1) \tau^2
- width = \sqrt{n} \tau
- aspect ratio = \sqrt{1 + 1 / n}
"""
dbg.dassert_isinstance(depth, int)
dbg.dassert_lte(1, depth)
dbg.dassert_lt(0, tau)
_LOG.debug("Calculating iterated ema of depth %i", depth)
_LOG.debug("range = %0.2f", depth * tau)
_LOG.debug("<t^2>^{1/2} = %0.2f", np.sqrt(depth * (depth + 1)) * tau)
_LOG.debug("width = %0.2f", np.sqrt(depth) * tau)
_LOG.debug("aspect ratio = %0.2f", np.sqrt(1 + 1.0 / depth))
_LOG.debug("tau = %0.2f", tau)
com = calculate_com_from_tau(tau)
_LOG.debug("com = %0.2f", com)
signal_hat = signal.copy()
for _ in range(0, depth):
signal_hat = signal_hat.ewm(
com=com, min_periods=min_periods, adjust=True, ignore_na=False, axis=0
).mean()
return signal_hat
def compute_smooth_derivative(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int,
scaling: int = 1,
order: int = 1,
) -> Union[pd.DataFrame, pd.Series]:
r"""Compute a "low-noise" differential operator.
'Low-noise' differential operator as in 3.3.9 of Dacorogna, et al.
- Computes difference of around time "now" over a time interval \tau_1 and
an average around time "now - \tau" over a time interval \tau_2
- Here, \tau_1, \tau_2 are taken to be approximately `tau`/ 2
The normalization factors are chosen so that
- the differential of a constant is zero
- the differential (`scaling` = 0) of `t` is approximately `tau`
- the derivative (`order` = 1) of `t` is approximately 1
The `scaling` parameter refers to the exponential weighting of inverse
tau.
The `order` parameter refers to the number of times the
compute_smooth_derivative operator is applied to the original signal.
"""
dbg.dassert_isinstance(order, int)
dbg.dassert_lte(0, order)
gamma = 1.22208
beta = 0.65
alpha = 1.0 / (gamma * (8 * beta - 3))
_LOG.debug("alpha = %0.2f", alpha)
tau1 = alpha * tau
_LOG.debug("tau1 = %0.2f", tau1)
tau2 = alpha * beta * tau
_LOG.debug("tau2 = %0.2f", tau2)
def order_one(
signal: Union[pd.DataFrame, pd.Series]
) -> Union[pd.DataFrame, pd.Series]:
s1 = compute_ema(signal, tau1, min_periods, 1)
s2 = compute_ema(signal, tau1, min_periods, 2)
s3 = -2.0 * compute_ema(signal, tau2, min_periods, 4)
differential = gamma * (s1 + s2 + s3)
if scaling == 0:
return differential
return differential / (tau ** scaling)
signal_diff = signal.copy()
for _ in range(0, order):
signal_diff = order_one(signal_diff)
return signal_diff
def compute_smooth_moving_average(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
) -> Union[pd.DataFrame, pd.Series]:
"""Implement moving average operator defined in terms of iterated compute_ema's.
Choosing min_depth > 1 results in a lagged operator.
Choosing min_depth = max_depth = 1 reduces to a single compute_ema.
Abrupt impulse response that tapers off smoothly like a sigmoid
(hence smoother than an equally-weighted moving average).
For min_depth = 1 and large max_depth, the series is approximately
constant for t << 2 * range_. In particular, when max_depth >= 5,
the kernels are more rectangular than compute_ema-like.
"""
dbg.dassert_isinstance(min_depth, int)
dbg.dassert_isinstance(max_depth, int)
dbg.dassert_lte(1, min_depth)
dbg.dassert_lte(min_depth, max_depth)
range_ = tau * (min_depth + max_depth) / 2.0
_LOG.debug("Range = %0.2f", range_)
ema_eval = functools.partial(compute_ema, signal, tau, min_periods)
denom = float(max_depth - min_depth + 1)
# Not the most efficient implementation, but follows 3.56 of Dacorogna
# directly.
return sum(map(ema_eval, range(min_depth, max_depth + 1))) / denom
def extract_smooth_moving_average_weights(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_depth: int = 1,
max_depth: int = 1,
index_location: Optional[Any] = None,
) -> pd.DataFrame:
"""
Return present and historical weights used in SMA up to `index_location`.
This can be used in isolation to inspect SMA weights, or can be used on
data to, e.g., generate training data weights.
TODO(Paul): Consider generalizing this to also work with other filters.
:param signal: data that provides an index (for reindexing). No column
values used.
:param tau: as in `compute_smooth_moving_average()`
:param min_depth: as in `compute_smooth_moving_average()`
:param max_depth: as in `compute_smooth_moving_average()`
:index_location: current and latest value to be considered operated upon by
the smooth moving average (e.g., the last in-sample index). If `None`,
then use the last index location of `signal`.
:return: dataframe with two columns of weights:
1. absolute weights (e.g., weights sum to 1)
2. relative weights (weight at `index_location` is equal to `1`, and
prior weights are expressed relative to this value
"""
idx = signal.index
dbg.dassert_isinstance(idx, pd.Index)
dbg.dassert(not idx.empty, msg="`signal.index` must be nonempty.")
index_location = index_location or idx[-1]
dbg.dassert_in(
index_location,
idx,
msg="`index_location` must be a member of `signal.index`",
)
dbg.dassert_lt(0, tau)
# Build a step series.
# - This is a sequence of ones followed by a sequence of zeros
# - The length of the ones series is determined by `tau` and is used for
# warm-up
# - The length of the zeros is at least as long as the length of the
# weight series implicitly asked for by the caller. If this is less than
# the warm-up length, then we extend the zeros so that we can calculate
# reliable absolute weights
desired_length = signal.loc[:index_location].shape[0]
warmup_length = int(np.round(10 * tau))
ones = pd.Series(index=range(0, warmup_length), data=1)
length = max(desired_length, warmup_length)
zeros = pd.Series(index=range(warmup_length, warmup_length + length), data=0)
step = pd.concat([ones, zeros], axis=0)
# Apply the smooth moving average function to the step function.
smoothed_step = compute_smooth_moving_average(
step,
tau=tau,
min_depth=min_depth,
max_depth=max_depth,
)
# Drop the warm-up ones from the smoothed series.
smoothed_step = smoothed_step.iloc[warmup_length - 1 :]
smoothed_step.name = "relative_weight"
# Calculate absolute weights.
absolute_weights = (smoothed_step / smoothed_step.sum()).rename(
"absolute_weight"
)
# Build a `weights` dataframe of relative and absolute kernel weights.
weights = pd.concat([smoothed_step, absolute_weights], axis=1).reset_index(
drop=True
)
# Truncate to `desired_length`, determined by `signal.index` and
# `index_location`.
weights = weights.iloc[:desired_length]
# Reverse the series (because the weights apply to historical
# observations).
weights = weights.iloc[::-1].reset_index(drop=True)
# Index and align the weights so that they terminate at `index_location`.
weights.index = signal.loc[:index_location].index
# Extend `weights` with NaNs if necessary.
return weights.reindex(signal.index)
# #############################################################################
# Rolling moments, norms, z-scoring, demeaning, etc.
# #############################################################################
def compute_rolling_moment(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
return compute_smooth_moving_average(
np.abs(signal) ** p_moment, tau, min_periods, min_depth, max_depth
)
def compute_rolling_norm(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Implement smooth moving average norm (when p_moment >= 1).
Moving average corresponds to compute_ema when min_depth = max_depth = 1.
"""
signal_p = compute_rolling_moment(
signal, tau, min_periods, min_depth, max_depth, p_moment
)
return signal_p ** (1.0 / p_moment)
def compute_rolling_var(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Implement smooth moving average central moment.
Moving average corresponds to compute_ema when min_depth = max_depth = 1.
"""
signal_ma = compute_smooth_moving_average(
signal, tau, min_periods, min_depth, max_depth
)
return compute_rolling_moment(
signal - signal_ma, tau, min_periods, min_depth, max_depth, p_moment
)
def compute_rolling_std(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Implement normalized smooth moving average central moment.
Moving average corresponds to compute_ema when min_depth = max_depth = 1.
"""
signal_tmp = compute_rolling_var(
signal, tau, min_periods, min_depth, max_depth, p_moment
)
return signal_tmp ** (1.0 / p_moment)
def compute_rolling_demean(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
) -> Union[pd.DataFrame, pd.Series]:
"""
Demean signal on a rolling basis with compute_smooth_moving_average.
"""
signal_ma = compute_smooth_moving_average(
signal, tau, min_periods, min_depth, max_depth
)
return signal - signal_ma
def compute_rolling_zscore(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
demean: bool = True,
delay: int = 0,
atol: float = 0,
) -> Union[pd.DataFrame, pd.Series]:
"""
Z-score using compute_smooth_moving_average and compute_rolling_std.
If delay > 0, then pay special attention to 0 and NaN handling to avoid
extreme values.
Moving average corresponds to compute_ema when min_depth = max_depth = 1.
If denominator.abs() <= atol, Z-score value is set to np.nan in order to
avoid extreme value spikes.
TODO(Paul): determine whether signal == signal.shift(0) always.
"""
if demean:
# Equivalent to invoking compute_rolling_demean and compute_rolling_std, but this way
# we avoid calculating signal_ma twice.
signal_ma = compute_smooth_moving_average(
signal, tau, min_periods, min_depth, max_depth
)
signal_std = compute_rolling_norm(
signal - signal_ma, tau, min_periods, min_depth, max_depth, p_moment
)
numerator = signal - signal_ma.shift(delay)
else:
signal_std = compute_rolling_norm(
signal, tau, min_periods, min_depth, max_depth, p_moment
)
numerator = signal
denominator = signal_std.shift(delay)
denominator[denominator.abs() <= atol] = np.nan
ret = numerator / denominator
return ret
def compute_rolling_skew(
signal: Union[pd.DataFrame, pd.Series],
tau_z: float,
tau_s: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Smooth moving average skew of z-scored signal.
"""
z_signal = compute_rolling_zscore(
signal, tau_z, min_periods, min_depth, max_depth, p_moment
)
skew = compute_smooth_moving_average(
z_signal ** 3, tau_s, min_periods, min_depth, max_depth
)
return skew
def compute_rolling_kurtosis(
signal: Union[pd.DataFrame, pd.Series],
tau_z: float,
tau_s: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Smooth moving average kurtosis of z-scored signal.
"""
z_signal = compute_rolling_zscore(
signal, tau_z, min_periods, min_depth, max_depth, p_moment
)
kurt = compute_smooth_moving_average(
z_signal ** 4, tau_s, min_periods, min_depth, max_depth
)
return kurt
def compute_centered_gaussian_log_likelihood(
df: pd.DataFrame,
observation_col: str,
variance_col: str,
square_variance_col: bool = False,
variance_shifts: int = 0,
prefix: Optional[str] = None,
) -> pd.DataFrame:
"""
Return the log-likelihoods of independent draws from centered Gaussians.
A higher log-likelihood score means that the model of independent
Gaussian draws with given variances is a better fit.
The log-likelihood of the series of observations may be obtained by
summing the individual log-likelihood values.
:param df: dataframe with float observation and variance columns
:param observation_col: name of column containing observations
:param variance_col: name of column containing variances
:square_variance_col: if `True`, square the values in `variance_col`
(use this if the column contains standard deviations)
:variance_shifts: number of shifts to apply to `variance_col` prior to
calculating log-likelihood. Use this if `variance_col` contains forward
predictions.
:prefix: prefix to add to name of output series
:return: dataframe of log-likelihoods and adjusted observations
"""
prefix = prefix or ""
dbg.dassert_isinstance(df, pd.DataFrame)
# Extract observations and variance, with optional shift applied.
obs = df[observation_col]
var = df[variance_col].shift(variance_shifts)
dbg.dassert(not (var <= 0).any(), msg="Variance values must be positive.")
if square_variance_col:
var = np.square(var)
# Restrict to relevant data and drop any rows with NaNs.
idx = pd.concat([obs, var], axis=1).dropna().index
obs = obs.loc[idx]
var = var.loc[idx]
# Ensure that there is at least one observation.
n_obs = idx.size
_LOG.debug("Number of non-NaN observations=%i", n_obs)
dbg.dassert_lt(0, n_obs)
# Perform log-likelihood calculation.
# This term only depends upon the presence of an observation. We preserve
# it here to facilitate comparisons across series with different numbers of
# observations.
constant_term = -0.5 * np.log(2 * np.pi)
# This term depends upon the observation values and variances.
data_term = -0.5 * (np.log(var) + np.square(obs).divide(var))
log_likelihoods = constant_term + data_term
log_likelihoods.name = prefix + "log_likelihood"
# Compute observations normalized by standard deviations.
adj_obs = obs.divide(np.sqrt(var))
adj_obs.name = prefix + "normalized_observations"
# Construct output dataframe.
df_out = pd.concat([adj_obs, log_likelihoods], axis=1)
return df_out
# #############################################################################
# Rolling Sharpe ratio
# #############################################################################
def compute_rolling_annualized_sharpe_ratio(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 2,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Compute rolling annualized Sharpe ratio and standard error.
The standard error adjustment uses the range of the smooth moving
average kernel as an estimate of the "number of data points" used in
the calculation of the Sharpe ratio.
"""
ppy = hdataf.infer_sampling_points_per_year(signal)
sr = compute_rolling_sharpe_ratio(
signal, tau, min_periods, min_depth, max_depth, p_moment
)
# TODO(*): May need to rescale denominator by a constant.
se_sr = np.sqrt((1 + (sr ** 2) / 2) / (tau * max_depth))
rescaled_sr = np.sqrt(ppy) * sr
rescaled_se_sr = np.sqrt(ppy) * se_sr
df = pd.DataFrame(index=signal.index)
df["annualized_SR"] = rescaled_sr
df["annualized_SE(SR)"] = rescaled_se_sr
return df
def compute_rolling_sharpe_ratio(
signal: Union[pd.DataFrame, pd.Series],
tau: float,
min_periods: int = 2,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Sharpe ratio using compute_smooth_moving_average and compute_rolling_std.
"""
signal_ma = compute_smooth_moving_average(
signal, tau, min_periods, min_depth, max_depth
)
signal_std = compute_rolling_norm(
signal - signal_ma, tau, min_periods, min_depth, max_depth, p_moment
)
# TODO(Paul): Annualize appropriately.
return signal_ma / signal_std
# #############################################################################
# Rolling correlation functions
# #############################################################################
# TODO(Paul): Change the interface so that the two series are cols of a df.
def compute_rolling_cov(
srs1: Union[pd.DataFrame, pd.Series],
srs2: Union[pd.DataFrame, pd.Series],
tau: float,
demean: bool = True,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
) -> Union[pd.DataFrame, pd.Series]:
"""
Smooth moving covariance.
"""
if demean:
srs1_adj = srs1 - compute_smooth_moving_average(
srs1, tau, min_periods, min_depth, max_depth
)
srs2_adj = srs2 - compute_smooth_moving_average(
srs2, tau, min_periods, min_depth, max_depth
)
else:
srs1_adj = srs1
srs2_adj = srs2
smooth_prod = compute_smooth_moving_average(
srs1_adj.multiply(srs2_adj), tau, min_periods, min_depth, max_depth
)
return smooth_prod
def compute_rolling_corr(
srs1: Union[pd.DataFrame, pd.Series],
srs2: Union[pd.DataFrame, pd.Series],
tau: float,
demean: bool = True,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Smooth moving correlation.
"""
if demean:
srs1_adj = srs1 - compute_smooth_moving_average(
srs1, tau, min_periods, min_depth, max_depth
)
srs2_adj = srs2 - compute_smooth_moving_average(
srs2, tau, min_periods, min_depth, max_depth
)
else:
srs1_adj = srs1
srs2_adj = srs2
smooth_prod = compute_smooth_moving_average(
srs1_adj.multiply(srs2_adj), tau, min_periods, min_depth, max_depth
)
srs1_std = compute_rolling_norm(
srs1_adj, tau, min_periods, min_depth, max_depth, p_moment
)
srs2_std = compute_rolling_norm(
srs2_adj, tau, min_periods, min_depth, max_depth, p_moment
)
return smooth_prod / (srs1_std * srs2_std)
def compute_rolling_zcorr(
srs1: Union[pd.DataFrame, pd.Series],
srs2: Union[pd.DataFrame, pd.Series],
tau: float,
demean: bool = True,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
p_moment: float = 2,
) -> Union[pd.DataFrame, pd.Series]:
"""
Z-score srs1, srs2 then calculate moving average of product.
Not guaranteed to lie in [-1, 1], but bilinear in the z-scored
variables.
"""
if demean:
z_srs1 = compute_rolling_zscore(
srs1, tau, min_periods, min_depth, max_depth, p_moment
)
z_srs2 = compute_rolling_zscore(
srs2, tau, min_periods, min_depth, max_depth, p_moment
)
else:
z_srs1 = srs1 / compute_rolling_norm(
srs1, tau, min_periods, min_depth, max_depth, p_moment
)
z_srs2 = srs2 / compute_rolling_norm(
srs2, tau, min_periods, min_depth, max_depth, p_moment
)
return compute_smooth_moving_average(
z_srs1.multiply(z_srs2), tau, min_periods, min_depth, max_depth
)
# #############################################################################
# Outlier handling
# #############################################################################
def process_outliers(
srs: pd.Series,
mode: str,
lower_quantile: float,
upper_quantile: Optional[float] = None,
window: Optional[int] = None,
min_periods: Optional[int] = None,
info: Optional[dict] = None,
) -> pd.Series:
"""
Process outliers in different ways given lower / upper quantiles.
Default behavior:
- If `window` is `None`, set `window` to series length
- This works like an expanding window (we always look at the full
history, except for anything burned by `min_periods`)
- If `min_periods` is `None` and `window` is `None`, set `min_periods` to
`0`
- Like an expanding window with no data burned
- If `min_periods` is `None` and `window` is not `None`, set `min_periods`
to `window`
- This is a sliding window with leading data burned so that every
estimate uses a full window's worth of data
Note:
- If `window` is set to `None` according to these conventions (i.e., we
are in an "expanding window" mode), then outlier effects are never
"forgotten" and the processing of the data can depend strongly upon
where the series starts
- For this reason, it is suggested that `window` be set to a finite value
adapted to the data/frequency
:param srs: pd.Series to process
:param lower_quantile: lower quantile (in range [0, 1]) of the values to keep
The interval of data kept without any changes is [lower, upper]. In other
terms the outliers with quantiles strictly smaller and larger than the
respective bounds are processed.
:param upper_quantile: upper quantile with the same semantic as
lower_quantile. If `None`, the quantile symmetric of the lower quantile
with respect to 0.5 is taken. E.g., an upper quantile equal to 0.7 is
taken for a lower_quantile = 0.3
:param window: rolling window size
:param min_periods: minimum number of observations in window required to
calculate the quantiles. The first `min_periods` values will not be
processed. If `None`, defaults to `window`.
:param mode: it can be "winsorize", "set_to_nan", "set_to_zero"
:param info: empty dict-like object that this function will populate with
statistics about the performed operation
:return: transformed series with the same number of elements as the input
series. The operation is not in place.
"""
# Check parameters.
dbg.dassert_isinstance(srs, pd.Series)
dbg.dassert_lte(0.0, lower_quantile)
if upper_quantile is None:
upper_quantile = 1.0 - lower_quantile
dbg.dassert_lte(lower_quantile, upper_quantile)
dbg.dassert_lte(upper_quantile, 1.0)
# Process default `min_periods` and `window` parameters.
if min_periods is None:
if window is None:
min_periods = 0
else:
min_periods = window
if window is None:
window = srs.shape[0]
if window < 30:
_LOG.warning("`window`=`%s` < `30`", window)
if min_periods > window:
_LOG.warning("`min_periods`=`%s` > `window`=`%s`", min_periods, window)
# Compute bounds.
l_bound = srs.rolling(window, min_periods=min_periods, center=False).quantile(
lower_quantile
)
u_bound = srs.rolling(window, min_periods=min_periods, center=False).quantile(
upper_quantile
)
_LOG.debug(
"Removing outliers in [%s, %s] with mode=%s",
lower_quantile,
upper_quantile,
mode,
)
# Compute stats.
if info is not None:
dbg.dassert_isinstance(info, dict)
# Dictionary should be empty.
dbg.dassert(not info)
info["series_name"] = srs.name
info["num_elems_before"] = len(srs)
info["num_nans_before"] = np.isnan(srs).sum()
info["num_infs_before"] = np.isinf(srs).sum()
info["quantiles"] = (lower_quantile, upper_quantile)
info["mode"] = mode
#
srs = srs.copy()
# Here we implement the functions instead of using library functions (e.g,
# `scipy.stats.mstats.winsorize`) since we want to compute some statistics
# that are not readily available from the library function.
l_mask = srs < l_bound
u_mask = u_bound < srs
if mode == "winsorize":
# Assign the outliers to the value of the bounds.
srs[l_mask] = l_bound[l_mask]
srs[u_mask] = u_bound[u_mask]
else:
mask = u_mask | l_mask
if mode == "set_to_nan":
srs[mask] = np.nan
elif mode == "set_to_zero":
srs[mask] = 0.0
else:
dbg.dfatal("Invalid mode='%s'" % mode)
# Append more the stats.
if info is not None:
info["bounds"] = pd.DataFrame({"l_bound": l_bound, "u_bound": u_bound})
num_removed = l_mask.sum() + u_mask.sum()
info["num_elems_removed"] = num_removed
info["num_elems_after"] = (
info["num_elems_before"] - info["num_elems_removed"]
)
info["percentage_removed"] = (
100.0 * info["num_elems_removed"] / info["num_elems_before"]
)
info["num_nans_after"] = np.isnan(srs).sum()
info["num_infs_after"] = np.isinf(srs).sum()
return srs
def process_outlier_df(
df: pd.DataFrame,
mode: str,
lower_quantile: float,
upper_quantile: Optional[float] = None,
window: Optional[int] = None,
min_periods: Optional[int] = None,
info: Optional[dict] = None,
) -> pd.DataFrame:
"""
Extend `process_outliers` to dataframes.
TODO(*): Revisit this with a decorator approach:
https://github.com/.../.../issues/568
"""
if info is not None:
dbg.dassert_isinstance(info, dict)
# Dictionary should be empty.
dbg.dassert(not info)
cols = {}
for col in df.columns:
if info is not None:
maybe_stats: Optional[Dict[str, Any]] = {}
else:
maybe_stats = None
srs = process_outliers(
df[col],
mode,
lower_quantile,
upper_quantile=upper_quantile,
window=window,
min_periods=min_periods,
info=maybe_stats,
)
cols[col] = srs
if info is not None:
info[col] = maybe_stats
ret = pd.DataFrame.from_dict(cols)
# Check that the columns are the same. We don't use dassert_eq because of
# #665.
dbg.dassert(
all(df.columns == ret.columns),
"Columns are different:\ndf.columns=%s\nret.columns=%s",
str(df.columns),
str(ret.columns),
)
return ret
def process_nonfinite(
srs: pd.Series,
remove_nan: bool = True,
remove_inf: bool = True,
info: Optional[dict] = None,
) -> pd.Series:
"""
Remove infinite and NaN values according to the parameters.
:param srs: pd.Series to process
:param remove_nan: remove NaN values if True and keep if False
:param remove_inf: remove infinite values if True and keep if False
:param info: empty dict-like object that this function will populate with
statistics about how many items were removed
:return: transformed copy of the input series
"""
dbg.dassert_isinstance(srs, pd.Series)
nan_mask = np.isnan(srs)
inf_mask = np.isinf(srs)
nan_inf_mask = nan_mask | inf_mask
# Make a copy of input that will be processed
if remove_nan & remove_inf:
res = srs[~nan_inf_mask].copy()
elif remove_nan & ~remove_inf:
res = srs[~nan_mask].copy()
elif ~remove_nan & remove_inf:
res = srs[~inf_mask].copy()
else:
res = srs.copy()
if info is not None:
dbg.dassert_isinstance(info, dict)
# Dictionary should be empty.
dbg.dassert(not info)
info["series_name"] = srs.name
info["num_elems_before"] = len(srs)
info["num_nans_before"] = np.isnan(srs).sum()
info["num_infs_before"] = np.isinf(srs).sum()
info["num_elems_removed"] = len(srs) - len(res)
info["num_nans_removed"] = info["num_nans_before"] - np.isnan(res).sum()
info["num_infs_removed"] = info["num_infs_before"] - np.isinf(res).sum()
info["percentage_elems_removed"] = (
100.0 * info["num_elems_removed"] / info["num_elems_before"]
)
return res
# #############################################################################
# Incremental PCA
# #############################################################################
def compute_ipca(
df: pd.DataFrame,
num_pc: int,
tau: float,
nan_mode: Optional[str] = None,
) -> Tuple[pd.DataFrame, List[pd.DataFrame]]:
"""
Incremental PCA.
The dataframe should already be centered.
https://ieeexplore.ieee.org/document/1217609
https://www.cse.msu.edu/~weng/research/CCIPCApami.pdf
:param num_pc: number of principal components to calculate
:param tau: parameter used in (continuous) compute_ema and compute_ema-derived kernels. For
typical ranges it is approximately but not exactly equal to the
center-of-mass (com) associated with an compute_ema kernel.
:param nan_mode: argument for hdataf.apply_nan_mode()
:return:
- df of eigenvalue series (col 0 correspond to max eigenvalue, etc.).
- list of dfs of unit eigenvectors (0 indexes df eigenvectors
corresponding to max eigenvalue, etc.).
"""
dbg.dassert_isinstance(
num_pc, int, msg="Specify an integral number of principal components."
)
dbg.dassert_lt(
num_pc,
df.shape[0],
msg="Number of time steps should exceed number of principal components.",
)
dbg.dassert_lte(
num_pc,
df.shape[1],
msg="Dimension should be greater than or equal to the number of principal components.",
)
dbg.dassert_lt(0, tau)
com = calculate_com_from_tau(tau)
alpha = 1.0 / (com + 1.0)
_LOG.debug("com = %0.2f", com)
_LOG.debug("alpha = %0.2f", alpha)
nan_mode = nan_mode or "fill_with_zero"
df = df.apply(hdataf.apply_nan_mode, mode=nan_mode)
lambdas: Dict[int, list] = {k: [] for k in range(num_pc)}
# V's are eigenvectors with norm equal to corresponding eigenvalue.
vs: Dict[int, list] = {k: [] for k in range(num_pc)}
unit_eigenvecs: Dict[int, list] = {k: [] for k in range(num_pc)}
step = 0
for n in df.index:
# Initialize u(n).
u = df.loc[n].copy()
for i in range(min(num_pc, step + 1)):
# Initialize ith eigenvector.
if i == step:
v = u.copy()
if np.linalg.norm(v):
_LOG.debug("Initializing eigenvector %s...", i)
step += 1
else:
# Main update step for eigenvector i.
u, v = _compute_ipca_step(u, vs[i][-1], alpha)
# Bookkeeping.
v.name = n
vs[i].append(v)
norm = np.linalg.norm(v)
lambdas[i].append(norm)
unit_eigenvecs[i].append(v / norm)
_LOG.debug("Completed %s steps of incremental PCA.", len(df))
# Convert lambda dict of lists to list of series.
# Convert unit_eigenvecs dict of lists to list of dataframes.
lambdas_srs = []
unit_eigenvec_dfs = []
for i in range(num_pc):
lambdas_srs.append(
pd.Series(index=df.index[-len(lambdas[i]) :], data=lambdas[i])
)
unit_eigenvec_dfs.append(pd.concat(unit_eigenvecs[i], axis=1).transpose())
lambda_df = pd.concat(lambdas_srs, axis=1)
return lambda_df, unit_eigenvec_dfs
def _compute_ipca_step(
u: pd.Series, v: pd.Series, alpha: float
) -> Tuple[pd.Series, pd.Series]:
"""
Single step of incremental PCA.
At each point, the norm of v is the eigenvalue estimate (for the component
to which u and v refer).
:param u: residualized observation for step n, component i
:param v: unnormalized eigenvector estimate for step n - 1, component i
:param alpha: compute_ema-type weight (choose in [0, 1] and typically < 0.5)
:return: (u_next, v_next), where
* u_next is residualized observation for step n, component i + 1
* v_next is unnormalized eigenvector estimate for step n, component i
"""
if np.linalg.norm(v) == 0:
v_next = v * 0
u_next = u.copy()
else:
v_next = (1 - alpha) * v + alpha * u * np.dot(u, v) / np.linalg.norm(v)
u_next = u - np.dot(u, v) * v / (np.linalg.norm(v) ** 2)
return u_next, v_next
def compute_unit_vector_angular_distance(df: pd.DataFrame) -> pd.Series:
"""
Calculate the angular distance between unit vectors.
Accepts a df of unit vectors (each row a unit vector) and returns a series
of consecutive angular distances indexed according to the later time point.
The angular distance lies in [0, 1].
"""
vecs = df.values
# If all of the vectors are unit vectors, then
# np.diag(vecs.dot(vecs.T)) should return an array of all 1.'s.
cos_sim = np.diag(vecs[:-1, :].dot(vecs[1:, :].T))
ang_dist = np.arccos(cos_sim) / np.pi
srs = pd.Series(index=df.index[1:], data=ang_dist, name="angular change")
return srs
def compute_eigenvector_diffs(eigenvecs: List[pd.DataFrame]) -> pd.DataFrame:
"""
Take a list of eigenvectors and return a df of angular distances.
"""
ang_chg = []
for i, vec in enumerate(eigenvecs):
srs = compute_unit_vector_angular_distance(vec)
srs.name = i
ang_chg.append(srs)
df = pd.concat(ang_chg, axis=1)
return df
# #############################################################################
# Trend + Residual decomposition
# #############################################################################
def get_trend_residual_decomp(
signal: pd.Series,
tau: float,
min_periods: int = 0,
min_depth: int = 1,
max_depth: int = 1,
nan_mode: Optional[str] = None,
) -> pd.DataFrame:
"""
Decompose a signal into trend + residual.
- The `trend` warm-up period is set by `min_periods`
- If `min_periods` is positive, then leading values of `trend` are NaN
- If `nan_mode = "propagate"`, then `residual` and `trend` are Nan
whenever at least one is
- If `nan_mode = "restore_to_residual", then `residual` is always non-NaN
whenever `residual` is
- E.g., so, in particular, during any `trend` warm-up period,
`signal = residual` and `signal = trend + residual` always holds
- However, when the warm-up phase ends, `residual` may experience a
large jump
:return: dataframe with columns "trend" and "residual", indexed like
"signal"
"""
if nan_mode is None:
nan_mode = "propagate"
signal_ma = compute_smooth_moving_average(
signal, tau, min_periods, min_depth, max_depth
)
df = pd.DataFrame(index=signal.index)
df["trend"] = signal_ma
detrended = signal - signal_ma
if nan_mode == "restore_to_residual":
# Restore `signal` values if `detrended` is NaN due to detrending artifacts
# (e.g., from setting `min_periods`).
detrended.loc[detrended.isna()] = signal
elif nan_mode == "propagate":
pass
else:
raise ValueError(f"Unrecognized nan_mode `{nan_mode}`")
df["residual"] = detrended
return df
# #############################################################################
# Discrete wavelet transform
# #############################################################################
def get_swt(
sig: Union[pd.DataFrame, pd.Series],
wavelet: Optional[str] = None,
depth: Optional[int] = None,
timing_mode: Optional[str] = None,
output_mode: Optional[str] = None,
) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]:
"""
Get stationary wt details and smooths for all available scales.
If sig.index.freq == "B", then there is the following rough correspondence
between wavelet levels and time scales:
weekly ~ 2-3
monthly ~ 4-5
quarterly ~ 6
annual ~ 8
business cycle ~ 11
If sig.index.freq == "T", then the approximate scales are:
5 min ~ 2-3
quarter hourly ~ 4
hourly ~ 6
daily ~ 10-11
:param sig: input signal
:param wavelet: pywt wavelet name, e.g., "db8"
:param depth: the number of decomposition steps to perform. Corresponds to
"level" parameter in `pywt.swt`
:param timing_mode: supported timing modes are
- "knowledge_time":
- reindex transform according to knowledge times
- remove warm-up artifacts
- "zero_phase":
- no reindexing (e.g., no phase lag in output, but transform
timestamps are not necessarily knowledge times)
- remove warm-up artifacts
- "raw": `pywt.swt` as-is
:param output_mode: valid output modes are
- "tuple": return (smooth_df, detail_df)
- "smooth": return smooth_df
- "detail": return detail_df
:return: see `output_mode`
"""
# Choice of wavelet may significantly impact results.
wavelet = wavelet or "haar"
_LOG.debug("wavelet=`%s`", wavelet)
if isinstance(sig, pd.DataFrame):
dbg.dassert_eq(
sig.shape[1], 1, "Input dataframe must have a single column."
)
sig = sig.squeeze()
if timing_mode is None:
timing_mode = "knowledge_time"
_LOG.debug("timing_mode=`%s`", timing_mode)
if output_mode is None:
output_mode = "tuple"
_LOG.debug("output_mode=`%s`", output_mode)
# Convert to numpy and pad, since the pywt swt implementation
# requires that the input be a power of 2 in length.
sig_len = sig.size
padded = _pad_to_pow_of_2(sig.values)
# Perform the wavelet decomposition.
decomp = pywt.swt(padded, wavelet=wavelet, level=depth, norm=True)
# Ensure we have at least one level.
levels = len(decomp)
_LOG.debug("levels=%d", levels)
dbg.dassert_lt(0, levels)
# Reorganize wavelet coefficients. `pywt.swt` output is of the form
# [(cAn, cDn), ..., (cA2, cD2), (cA1, cD1)]
smooth, detail = zip(*reversed(decomp))
# Reorganize `swt` output into a dataframe
# - columns indexed by `int` wavelet level (1 up to `level`)
# - index identical to `sig.index` (padded portion deleted)
detail_dict = {}
smooth_dict = {}
for level in range(1, levels + 1):
detail_dict[level] = detail[level - 1][:sig_len]
smooth_dict[level] = smooth[level - 1][:sig_len]
detail_df = pd.DataFrame.from_dict(data=detail_dict)
detail_df.index = sig.index
smooth_df = pd.DataFrame.from_dict(data=smooth_dict)
smooth_df.index = sig.index
# Record wavelet width (required for removing warm-up artifacts).
width = len(pywt.Wavelet(wavelet).filter_bank[0])
_LOG.debug("wavelet width=%s", width)
if timing_mode == "knowledge_time":
for j in range(1, levels + 1):
# Remove "warm-up" artifacts.
_set_warmup_region_to_nan(detail_df[j], width, j)
_set_warmup_region_to_nan(smooth_df[j], width, j)
# Index by knowledge time.
detail_df[j] = _reindex_by_knowledge_time(detail_df[j], width, j)
smooth_df[j] = _reindex_by_knowledge_time(smooth_df[j], width, j)
elif timing_mode == "zero_phase":
for j in range(1, levels + 1):
# Delete "warm-up" artifacts.
_set_warmup_region_to_nan(detail_df[j], width, j)
_set_warmup_region_to_nan(smooth_df[j], width, j)
elif timing_mode == "raw":
return smooth_df, detail_df
else:
raise ValueError(f"Unsupported timing_mode `{timing_mode}`")
# Drop columns that are all-NaNs (e.g., artifacts of padding).
smooth_df.dropna(how="all", axis=1, inplace=True)
detail_df.dropna(how="all", axis=1, inplace=True)
if output_mode == "tuple":
return smooth_df, detail_df
if output_mode == "smooth":
return smooth_df
if output_mode == "detail":
return detail_df
raise ValueError("Unsupported output_mode `{output_mode}`")
def get_swt_level(
sig: Union[pd.DataFrame, pd.Series],
wavelet: str,
level: int,
timing_mode: Optional[str] = None,
output_mode: Optional[str] = None,
) -> pd.Series:
"""
Wraps `get_swt` and extracts a single wavelet level.
:param sig: input signal
:param wavelet: pywt wavelet name, e.g., "db8"
:param level: the wavelet level to extract
:param timing_mode: supported timing modes are
- "knowledge_time":
- reindex transform according to knowledge times
- remove warm-up artifacts
- "zero_phase":
- no reindexing (e.g., no phase lag in output, but transform
timestamps are not necessarily knowledge times)
- remove warm-up artifacts
- "raw": `pywt.swt` as-is
:param output_mode: valid output modes are
- "smooth": return smooth_df for `level`
- "detail": return detail_df for `level`
:return: see `output_mode`
"""
dbg.dassert_in(output_mode, ["smooth", "detail"])
swt = get_swt(
sig,
wavelet=wavelet,
depth=level,
timing_mode=timing_mode,
output_mode=output_mode,
)
dbg.dassert_in(level, swt.columns)
return swt[level]
def _pad_to_pow_of_2(arr: np.array) -> np.array:
"""
Minimally extend `arr` with zeros so that len is a power of 2.
"""
sig_len = arr.shape[0]
_LOG.debug("signal length=%d", sig_len)
pow2_ceil = int(2 ** np.ceil(np.log2(sig_len)))
padded = np.pad(arr, (0, pow2_ceil - sig_len))
_LOG.debug("padded length=%d", len(padded))
return padded
def _set_warmup_region_to_nan(srs: pd.Series, width: int, level: int) -> None:
"""
Remove warm-up artifacts by setting to `NaN`.
NOTE: Modifies `srs` in-place.
:srs: swt
:width: width (length of support of mother wavelet)
:level: wavelet level
"""
srs[: width * 2 ** (level - 1) - width // 2] = np.nan
def _reindex_by_knowledge_time(
srs: pd.Series, width: int, level: int
) -> pd.Series:
"""
Shift series so that indexing is according to knowledge time.
:srs: swt
:width: width (length of support of mother wavelet)
:level: wavelet level
"""
return srs.shift(width * 2 ** (level - 1) - width // 2)
def compute_swt_var(
sig: Union[pd.DataFrame, pd.Series],
wavelet: Optional[str] = None,
depth: Optional[int] = None,
timing_mode: Optional[str] = None,
axis: int = 1,
) -> pd.DataFrame:
"""
Get swt var using levels up to `depth`.
Params as in `get_swt()`.
"""
if isinstance(sig, pd.Series):
sig = sig.to_frame()
dbg.dassert_eq(len(sig.columns), 1)
col = sig.columns[0]
df = compute_swt_covar(
sig,
col1=col,
col2=col,
wavelet=wavelet,
depth=depth,
timing_mode=timing_mode,
axis=axis,
)
dbg.dassert_in("swt_var", df.columns)
return df
def compute_swt_covar(
df: pd.DataFrame,
col1: str,
col2: str,
wavelet: Optional[str] = None,
depth: Optional[int] = None,
timing_mode: Optional[str] = None,
axis: int = 1,
) -> pd.DataFrame:
"""
Get swt covar using levels up to `depth`.
Params as in `get_swt()`.
"""
dfs = {}
for col in [col1, col2]:
dfs[col] = get_swt(
df[col],
wavelet=wavelet,
depth=depth,
timing_mode=timing_mode,
output_mode="detail",
)
prod = dfs[col1].multiply(dfs[col2])
fvi = prod.first_valid_index()
col1_df = dfs[col1].loc[fvi:]
col2_df = dfs[col2].loc[fvi:]
if axis == 0:
prod = prod.dropna()
col1_df = col1_df.dropna()
col2_df = col2_df.dropna()
results = []
covar_name = "swt_covar" if col1 != col2 else "swt_var"
results.append(prod.sum(axis=axis, skipna=False).rename(covar_name))
if col1 != col2:
results.append(
np.square(col1_df)
.sum(axis=axis, skipna=False)
.rename(str(col1) + "_swt_var")
)
results.append(
np.square(col2_df)
.sum(axis=axis, skipna=False)
.rename(str(col2) + "_swt_var")
)
return pd.concat(results, axis=1)
def compute_swt_sum(
sig: Union[pd.DataFrame, pd.Series],
wavelet: Optional[str] = None,
depth: Optional[int] = None,
timing_mode: Optional[str] = None,
) -> pd.DataFrame:
"""
Get swt coeffcient sums using levels up to `depth`.
Params as in `get_swt()`.
"""
df = get_swt(
sig,
wavelet=wavelet,
depth=depth,
timing_mode=timing_mode,
output_mode="detail",
)
srs = -1 * df.sum(axis=1, skipna=False)
srs.name = "swt_sum"
return srs.to_frame()
def get_dyadic_zscored(
sig: pd.Series, demean: bool = False, **kwargs: Any
) -> pd.DataFrame:
"""
Z-score `sig` with successive powers of 2.
:return: dataframe with cols named according to the exponent of 2. Number
of cols is determined based on signal length.
"""
pow2_ceil = int(np.ceil(np.log2(sig.size)))
zscored = {}
for tau_pow in range(1, pow2_ceil):
zscored[tau_pow] = compute_rolling_zscore(
sig, tau=2 ** tau_pow, demean=demean, **kwargs
)
df = pd.DataFrame.from_dict(zscored)
return df
# #############################################################################
# Resampling
# #############################################################################
def resample(
data: Union[pd.Series, pd.DataFrame],
**resample_kwargs: Dict[str, Any],
) -> Union[pd.Series, pd.DataFrame]:
"""
Execute series resampling with specified `.resample()` arguments.
The `rule` argument must always be specified and the `closed` and `label`
arguments are treated specially by default.
The default values of `closed` and `label` arguments are intended to make
pandas `resample()` behavior consistent for every value of `rule` and to
make resampling causal. So if we have sampling times t_0 < t_1 < t_2, then,
after resampling, the values at t_1 and t_2 should not be incorporated
into the resampled value timestamped with t_0. Note that this behavior is
at odds with what may be intuitive for plotting lower-frequency data, e.g.,
yearly data is typically labeled in a plot by the start of the year.
:data: pd.Series or pd.DataFrame with a datetime index
:resample_kwargs: arguments for pd.DataFrame.resample
:return: DatetimeIndexResampler object
"""
dbg.dassert_in("rule", resample_kwargs, "Argument 'rule' must be specified")
# Unless specified by the user, the resampling intervals are intended as
# (a, b] with label on the right.
if "closed" not in resample_kwargs:
resample_kwargs["closed"] = "right"
if "label" not in resample_kwargs:
resample_kwargs["label"] = "right"
# Execute resampling with specified kwargs.
_LOG.debug(
"Resampling data with size=%s using kwargs='%s'",
str(data.size),
str(resample_kwargs),
)
resampled_data = data.resample(**resample_kwargs)
_LOG.debug("resampled_data.size=%s", str(resampled_data.size))
return resampled_data
# #############################################################################
# Special functions
# #############################################################################
def c_infinity(x: float) -> float:
"""
Return C-infinity function evaluated at x.
This function is zero for x <= 0 and approaches exp(1) as x ->
infinity.
"""
if x > 0:
return np.exp(-1 / x)
return 0
def c_infinity_step_function(x: float) -> float:
"""
Return C-infinity step function evaluated at x.
This function is
- 0 for x <= 0
- 1 for x >= 1
"""
fx = c_infinity(x)
f1mx = c_infinity(1 - x)
if fx + f1mx == 0:
return np.nan
return fx / (fx + f1mx)
def c_infinity_bump_function(x: float, a: float, b: float) -> float:
"""
Return value of C-infinity bump function evaluated at x.
:param x: point at which to evaluate
:param a: function is 1 between -a and a
:param b: function is zero for abs(x) >= b
"""
dbg.dassert_lt(0, a)
dbg.dassert_lt(a, b)
y = (x ** 2 - a ** 2) / (b ** 2 - a ** 2)
inverse_bump = c_infinity_step_function(y)
return 1 - inverse_bump
|
from PIL import Image, ImageDraw, ImageFont
import math
def get_widths(columns, sizes, participants_by_column, PARTICIPANTS_PER_COLUMN, COLUMN_MARGIN):
# Calculate the last column on its own
columns_width = []
for col_number in range(0, columns):
column_length = len(participants_by_column[col_number])
text_widths = []
for participant_in_column_number in range(0, column_length):
idx= col_number*PARTICIPANTS_PER_COLUMN + participant_in_column_number
w, h = sizes[idx]
text_widths.append(w)
column_width = max(text_widths)
if col_number == (columns - 1):
# Last column
last_column_width = column_width
else:
columns_width.append(round(column_width + column_width*COLUMN_MARGIN))
max_column_width = max(columns_width)
image_width = max_column_width*(columns-1) + last_column_width
return image_width, max_column_width
def apply_margins_to_image(height_margin, width_margin, height, width):
height += + height_margin*2 # *2 means top and bottom margins
height = round(height)
width += width_margin*2 # *2 means left and right margins
width = round(width)
return height, width
def draw_names(height, width, height_margin, width_margin, columns, max_name_height, max_column_width, participants_by_column, font):
# Drawing
img = Image.new('RGB', (width, height), (255, 255, 255))
d = ImageDraw.Draw(img)
h_index = height_margin
w_index = width_margin
for column_number in range(0, columns):
col_width = max_column_width
col_participants = participants_by_column[column_number]
for participant in col_participants:
d.text((w_index, h_index), participant.name, fill=(0, 0, 0), font=font)
if not participant.isalive():
w, h = font.getsize(participant.name)
middle_h = h_index + round(max_name_height/2)
d.line([(w_index, middle_h), (w_index+w, middle_h)], fill=(255, 0, 0), width=5)
h_index += max_name_height
w_index += col_width
h_index = height_margin
return img
def draw_participants(participants, VERT_AND_HTAL_MARGIN = 0.1, COLUMN_MARGIN = 0.2, PARTICIPANTS_PER_COLUMN = 30):
columns = math.ceil(len(participants)/PARTICIPANTS_PER_COLUMN)
participants_by_column = [participants[x:x+PARTICIPANTS_PER_COLUMN] for x in range(0, len(participants), PARTICIPANTS_PER_COLUMN)]
font = ImageFont.truetype('arial.ttf', 48)
sizes = [font.getsize(participant.name) for participant in participants]
max_name_height = max(h for w, h in sizes)
# Calculating height
# Image height is the same as column height
image_height = round(max_name_height * PARTICIPANTS_PER_COLUMN)
image_margin = image_height*VERT_AND_HTAL_MARGIN
# image_height_margin = image_height*VERT_MARGIN
image_height_margin = image_margin
# Calculate widths
image_width, max_column_width = get_widths(columns, sizes, participants_by_column, PARTICIPANTS_PER_COLUMN, COLUMN_MARGIN)
# image_width_margin = image_width*HTAL_MARGIN
image_width_margin = image_margin
image_height, image_width = apply_margins_to_image(image_height_margin, image_width_margin, image_height, image_width)
img = draw_names(image_height, image_width, image_height_margin, image_width_margin, columns, max_name_height, max_column_width, participants_by_column, font)
return img
if __name__ == '__main__':
def get_participants():
participants = []
with open('participants.txt', 'r') as f:
while True:
line = f.readline()
if line == '':
break
name = line.replace('\n', '')
participants.append(Participant(name))
return participants
def set_dead_participants(participants):
with open('dead.txt', 'r') as f:
while True:
line = f.readline()
if line == '':
break
name = line.replace('\n', '')
participant = [participant for participant in participants if participant.name == name][0]
participant.alive = False
class Participant:
def __init__(self, name):
self.name = name
self.alive = True
def isalive(self):
return self.alive
participants = get_participants()
set_dead_participants(participants)
draw_participants(participants).save('participants.png')
|
# by cefuve electronics
# Github: https://www.github.com/cefuve
# Webpage: https://www.cefuve.com
import serial, sys, time
import PySimpleGUI as sg
import serial.tools.list_ports
ports = list(serial.tools.list_ports.comports())
puertos = []
conectado = False
Arduino = None
#Get port devices
for p in ports:
puertos.append(p.device)
#Configure app windows
sg.theme('DarkAmber')
col_1 = [[sg.Text("PIN", size=(3,1))],
[sg.Text("D13")],
[sg.Text("D12")],
[sg.Text("D11")],
[sg.Text("D10")],
[sg.Text(" D9")],
[sg.Text(" D8")]]
dir_1 = [[sg.Text("DIR", size=(3,1))],
[sg.Text(" in ",key='d13')],
[sg.Text(" in ",key='d12')],
[sg.Text(" in ",key='d11')],
[sg.Text(" in ",key='d10')],
[sg.Text(" in ",key='d9')],
[sg.Text(" in ",key='d8')]]
state_1 = [[sg.Text("ST", size=(2,1))],
[sg.Text("[ ]",key='p13')],
[sg.Text("[ ]",key='p12')],
[sg.Text("[ ]",key='p11')],
[sg.Text("[ ]",key='p10')],
[sg.Text("[ ]",key='p9')],
[sg.Text("[ ]",key='p8')]]
col_2 = [[sg.Text("PIN", size=(3,1))],
[sg.Text(" D7 ")],
[sg.Text(" D6")],
[sg.Text(" D5")],
[sg.Text(" D4")],
[sg.Text(" D3")],
[sg.Text(" D2")],
[sg.Text(" RX")],
[sg.Text(" TX")]]
dir_2 = [[sg.Text("DIR", size=(3,1))],
[sg.Text(" in ",key='d7')],
[sg.Text(" in ",key='d6')],
[sg.Text(" in ",key='d5')],
[sg.Text(" in ",key='d4')],
[sg.Text(" in ",key='d3')],
[sg.Text(" in ",key='d2')],
[sg.Text(" in ",key='d1')],
[sg.Text("out",key='d0')]]
state_2 = [[sg.Text("ST", size=(2,1))],
[sg.Text("[ ]",key='p7')],
[sg.Text("[ ]",key='p6')],
[sg.Text("[ ]",key='p5')],
[sg.Text("[ ]",key='p4')],
[sg.Text("[ ]",key='p3')],
[sg.Text("[ ]",key='p2')],
[sg.Text("[ ]",key='p1')],
[sg.Text("[ ]",key='p0')]]
portd = [[sg.Text("PORTD", size=(6,1))],[sg.Text(" D.5")],[sg.Text(" D.4")],[sg.Text(" D.3")],[sg.Text(" D.2")],[sg.Text(" D.1")],[sg.Text(" D.0")]]
portb = [[sg.Text("PORTB", size=(6,1))],[sg.Text(" B.7")],[sg.Text(" B.6")],[sg.Text(" B.5")],[sg.Text(" B.4")],[sg.Text(" B.3")],[sg.Text(" B.2")],[sg.Text(" B.1")],[sg.Text(" B.0")]]
layout = [[sg.Text("Select Port:"), sg.Combo(puertos, enable_events=True, key='combo', size=(6,1)), sg.Button("Open")],
[sg.Text("")],
[sg.Frame(layout=portd, title='', pad=(0,0)), sg.Frame(layout=col_1, title='', pad=(0,0)), sg.Frame(layout=dir_1, title='', pad=(0,0)), sg.Frame(layout=state_1, title='', pad=(0,0))],
[sg.Frame(layout=portb, title='', pad=(0,0)), sg.Frame(layout=col_2, title='', pad=(0,0)), sg.Frame(layout=dir_2, title='', pad=(0,0)), sg.Frame(layout=state_2, title='', pad=(0,0))],
[sg.Text("")],
[sg.Button("Exit", size=(10,1)), sg.Button("?")]]
center = [[sg.Column(layout, element_justification='center')]]
window = sg.Window("cfvMonitor v2.0", center)
#Function that create about window
def open_about():
layout = [[sg.Text('::[ Information ]::..')],
[sg.Text('PORT is microcontroller register for pin manipulation.')],
[sg.Text('BIT is microcontroller port register address.')],
[sg.Text('PIN is Arduino number id for port\'s bit.')],
[sg.Text('DIR is port\'s pin direction (INPUT or OUTPUT).')],
[sg.Text('ST is port\'s pin state (HIGH or LOW).')],
[sg.Text('')],
[sg.Text('::[ Credits ]::..')],
[sg.Text('by cefuve electronics')],
[sg.Text('www.cefuve.com')],
[sg.Text('2021')],]
return sg.Window('About...', layout)
#Infinite loop
while True:
event, values = window.read(timeout=0)
#Get info from serial and trim
if conectado == True:
received = ascii(Arduino.readline())
data = received[2:-5]
#print(data)
if data[0] == 'P':
for i in range(14):
if data[i+1] == '1':
window['p'+str(i)].update('[■]')
else:
window['p'+str(i)].update('[ ]')
if data[0] == 'D':
for i in range(2,14):
if data[i+1] == '1':
window['d'+str(i)].update('out ')
else:
window['d'+str(i)].update(' in ')
#Connecting to selected port
if event == "Open" and values['combo'] != "":
try:
serial.Serial.close()
except:
pass
port = values['combo']
Arduino = serial.Serial(port, 115200, timeout=1.0)
data = ascii(Arduino.readline())
Arduino.close()
if len(data) > 5:
conectado = True
Arduino = serial.Serial(port, 115200)
sg.popup_auto_close('Connected.', auto_close_duration=1.5)
else:
Arduino.close()
conectado = False
sg.Popup('No cfvMonitor configured...', keep_on_top=True)
#Exit button
if event == "Exit" or event == sg.WIN_CLOSED:
break
#About button
if event == "?":
about = open_about()
event2, values2 = about.read(timeout=0)
#Finalize app
window.close()
|
<reponame>LP-CDF/AMi_Image_Analysis<filename>Setup_local.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 09:57:50 2020
"""
__date__ = "11-03-2021"
import os
import sys
import argparse
from pathlib import Path
import stat
from utils import _RAWIMAGES
def CreateUninstall(app_path, venv_path):
'''Create a python script to remove program or most of it
app_path and venv_path are string
'''
file_path = Path(app_path).joinpath("Uninstall.py")
print(f"Creating uninstall script in {file_path}")
content = f'''#!/usr/bin/env python3
import os, sys, shutil
from pathlib import Path
from subprocess import call
app_path="{app_path}"
venv_path="{venv_path}"
print("APP_PATH: ", app_path)
venv_path=Path(venv_path).parent
print("VENV_PATH: ", venv_path)
try:
os.path.isdir(venv_path)
shutil.rmtree(venv_path)
print("SUCCESSFULLY removed Python virtual env for AMi_Image_Analysis")
except:
print("WARNING: %s not Found, exiting."%venv_path)
sys.exit()
try:
os.path.isdir(app_path)
shutil.rmtree(app_path)
print("SUCCESSFULLY removed AMi_Image_Analysis")
except :
print("WARNING: %s not found"%app_path)
sys.exit()
print("------------------------------------------------------")
print("You have to manually remove any Startup Icon or alias")
print("------------------------------------------------------")
'''
with open(file_path, 'w') as f:
f.write(content)
os.chmod(file_path, st.st_mode | stat.S_IXUSR |
stat.S_IXGRP | stat.S_IXOTH)
def ChangeSheBang(app_path, filename, python_path):
'''app_path and filename are strings'''
file_path = Path(app_path).joinpath(filename)
with open(file_path, 'r') as f:
lines = f.readlines()
lines[0] = "#!" + python_path+'/python \n'
with open(file_path, 'w') as f:
for l in lines:
f.write(l)
st = os.stat(file_path)
os.chmod(file_path, st.st_mode | stat.S_IXUSR |
stat.S_IXGRP | stat.S_IXOTH)
def ChangeRAW(app_path, filename, _string):
'''app_path and filename are strings, stops at first encounter'''
file_path = Path(app_path).joinpath(filename)
INDEX = False
with open(file_path, 'r') as f:
lines = f.readlines()
for i in lines:
if "_RAWIMAGES=" in i:
INDEX = lines.index(i)
break
if INDEX is not False:
lines[INDEX] = '''_RAWIMAGES="%s"\n''' % _string
with open(file_path, 'w') as f:
for l in lines:
f.write(l)
def SetNoPRojectID(_string, _list):
indexes = [_list.index(i) for i in _list if _string in i]
_list[indexes[0]] = " self.project=directory.parts[-4] #-5 if projectID is set. or -4\n"
_list[indexes[1]] = " self.project=directory.parts[-3] #-4 if projectID is set. or -3\n"
return _list
def main(args=None):
global st
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.')
parser.add_argument('--no-ProjectID', default=False,
action='store_true', dest='noprojectID',
help="Don't use ProjectID in tree")
options = parser.parse_args(args)
print("within Setup_local.py OPTIONS noprojectID is", options.noprojectID)
activate_venv = {'linux': 'activate', 'darwin': 'activate',
'win32': 'activate.bat'}[sys.platform]
python_path = os.path.join(os.path.dirname(sys.executable))
app_path = os.path.abspath(os.path.dirname(sys.argv[0]))
file_path = Path(app_path).joinpath("bin", "AMI_Image_Analysis.sh")
with open(file_path, 'w') as f:
f.write('''#!/usr/bin/env bash
#File generated with Setup_local.py
virtenv="%s"
. ${virtenv}/%s
#DO NOT EDIT the next THREE LINES
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
parentdir="$(dirname "$DIR")"
python3 $parentdir/AMi_Image_Analysis.py
deactivate''' % (python_path, activate_venv))
# _list is [(path,filename,True/false for ChangeRaw, True/false for ChangeSheBang)]
_list = [(app_path, "utils.py", True, False),
(app_path+'/tools/', "Merge_AllNewPlates.py", True, True),
(app_path+'/tools/', "Merge_Zstack.py", True, True),
(app_path+'/tools/', "SaveDiskSpace.py", True, True),
(app_path, "autocrop.py", False, True),
(app_path, "Check_Circle_detection.py", False, True)]
# Change _RAWIMAGES to adapt to maybe different but compatible microscope softwares
for i in _list:
if i[2] is True:
ChangeRAW(i[0], i[1], _RAWIMAGES)
# do not use Set ProjectID if --no-ProjectID
with open('utils.py', 'r') as f:
lines = f.readlines()
if options.noprojectID is True:
NEW = SetNoPRojectID("self.project", lines)
with open("utils.py", 'w') as f:
for l in NEW:
f.write(l)
st = os.stat(file_path)
os.chmod(file_path, st.st_mode | stat.S_IXUSR |
stat.S_IXGRP | stat.S_IXOTH)
# Change shebang for some files that can be used in terminal and use openCV
for i in _list:
if i[3] is True:
ChangeSheBang(i[0], i[1], python_path)
if sys.platform == 'linux':
file_path = Path(app_path).joinpath("AMi_IA.desktop")
lines = f'''[Desktop Entry]
Name=AMi_Image_Analysis
Comment=Run AMI_Image_Analysis
Exec={Path(app_path).joinpath("bin", "AMI_Image_Analysis.sh")}
Icon={Path(app_path).joinpath("AMi_IA.png")}
Terminal=true
Type=Application'''
with open(file_path, 'w') as f:
for l in lines:
f.write(l)
print(f'''
------------------------------------------------------
If you want you can put an icon on your desktop by issuing the following command
cp {file_path} {os.path.expanduser("~/Desktop")}/.
------------------------------------------------------''')
if sys.platform == 'linux' or sys.platform == 'darwin':
file_path = Path(app_path).joinpath("bin", "AMI_Image_Analysis.sh")
print(f'''
------------------------------------------------------
Recommended: create an alias in your .bashrc or .bash_profile with:
alias AMI_IMage_Analysis='{file_path}'
------------------------------------------------------
''')
CreateUninstall(app_path, python_path)
print("\nInstallation Finished.\n")
print("------------------------------------------------------")
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
|
<reponame>yourmoonlight/maro
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from statistics import mean
import numpy as np
from maro.simulator import Env
from maro.rl import AgentManagerMode, Scheduler, SimpleActor, SimpleLearner
from maro.utils import LogFormat, Logger, convert_dottable
from components import CIMActionShaper, CIMStateShaper, POAgentManager, TruncatedExperienceShaper, create_po_agents
class EarlyStoppingChecker:
"""Callable class that checks the performance history to determine early stopping.
Args:
warmup_ep (int): Episode from which early stopping checking is initiated.
last_k (int): Number of latest performance records to check for early stopping.
perf_threshold (float): The mean of the ``last_k`` performance metric values must be above this value to
trigger early stopping.
perf_stability_threshold (float): The maximum one-step change over the ``last_k`` performance metrics must be
below this value to trigger early stopping.
"""
def __init__(self, warmup_ep: int, last_k: int, perf_threshold: float, perf_stability_threshold: float):
self._warmup_ep = warmup_ep
self._last_k = last_k
self._perf_threshold = perf_threshold
self._perf_stability_threshold = perf_stability_threshold
def get_metric(record):
return 1 - record["container_shortage"] / record["order_requirements"]
self._metric_func = get_metric
def __call__(self, perf_history) -> bool:
if len(perf_history) < max(self._last_k, self._warmup_ep):
return False
metric_series = list(map(self._metric_func, perf_history[-self._last_k:]))
max_delta = max(
abs(metric_series[i] - metric_series[i - 1]) / metric_series[i - 1] for i in range(1, self._last_k)
)
print(f"mean_metric: {mean(metric_series)}, max_delta: {max_delta}")
return mean(metric_series) > self._perf_threshold and max_delta < self._perf_stability_threshold
def launch(config):
# First determine the input dimension and add it to the config.
config = convert_dottable(config)
# Step 1: initialize a CIM environment for using a toy dataset.
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
# Step 2: create state, action and experience shapers. We also need to create an explorer here due to the
# greedy nature of the DQN algorithm.
state_shaper = CIMStateShaper(**config.env.state_shaping)
action_shaper = CIMActionShaper(action_space=list(np.linspace(-1.0, 1.0, config.agents.num_actions)))
experience_shaper = TruncatedExperienceShaper(**config.env.experience_shaping)
# Step 3: create an agent manager.
config["agents"]["input_dim"] = state_shaper.dim
agent_manager = POAgentManager(
name="cim_learner",
mode=AgentManagerMode.TRAIN_INFERENCE,
agent_dict=create_po_agents(agent_id_list, config.agents),
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper,
)
# Step 4: Create an actor and a learner to start the training process.
scheduler = Scheduler(
config.main_loop.max_episode,
early_stopping_checker=EarlyStoppingChecker(**config.main_loop.early_stopping)
)
actor = SimpleActor(env, agent_manager)
learner = SimpleLearner(
agent_manager, actor, scheduler,
logger=Logger("cim_learner", format_=LogFormat.simple, auto_timestamp=False)
)
learner.learn()
learner.test()
learner.dump_models(os.path.join(os.getcwd(), "models"))
if __name__ == "__main__":
from components.config import config
launch(config)
|
MNIST = False
if MNIST:
import mnist as IAM
else:
#import IAM_data as IAM
import IAM_data_words as IAM
import numpy as np
import os
if MNIST:
OUTPUT_MAX_LEN_TEXTLINE = IAM.MAX_DIGITS_TOTAL
else:
# OUTPUT_MAX_LEN_TEXTLINE = 80 # textline
OUTPUT_MAX_LEN_TEXTLINE = 20 # word
BATCH_SIZE = 32
MAX_POOL_NUM = 3
class preProcess():
def __init__(self):
#n_classes, num_train, num_valid, num_test, self.datasets = IAM.getData(64, 64, 64)
n_classes, num_train, num_valid, num_test, self.datasets = IAM.getData(None, None, None)
self.output_max_len = OUTPUT_MAX_LEN_TEXTLINE
max_pool_num = MAX_POOL_NUM
self.tokens = {'GO_TOKEN': 0, 'END_TOKEN': 1, 'PAD_TOKEN': 2}
self.num_tokens = len(self.tokens.keys())
self.height = IAM.IMG_HEIGHT
self.width = IAM.IMG_WIDTH
self.batch_size = BATCH_SIZE
self.vocab_size = n_classes + self.num_tokens
self.num_train = num_train
self.num_valid = num_valid
_, _, self.index2letter = IAM.labelDictionary()
self.n_per_epoch = self.num_train // self.batch_size
self.n_per_epoch_t = self.num_valid // self.batch_size
self.total_data_train, self.total_data_valid = self.processData(max_pool_num)
def label_padding(self, labels, num_tokens):
new_labels = []
new_label_len = []
for l in labels:
num = self.output_max_len - len(l) - 1
new_label_len.append(len(l)+2)
l = np.array(l) + num_tokens
l = list(l)
l = [self.tokens['GO_TOKEN']] + l + [self.tokens['END_TOKEN']]
if not num == 0:
l.extend([self.tokens['PAD_TOKEN']] * num) # replace PAD_TOKEN
new_labels.append(l)
def make_weights(seq_lens, output_max_len):
new_out = []
for i in seq_lens:
ele = [1]*i + [0]*(output_max_len + 1 -i)
new_out.append(ele)
return new_out
return new_labels, make_weights(new_label_len, self.output_max_len)
def processData(self, max_pool_num):
trainImg, seqLen_train, trainLabel, validationImg, seqLen_validation, validationLabel, testImg, seqLen_test, testLabel = self.datasets
seqLen_train = self.proper_seq_len(seqLen_train, 2**max_pool_num)
seqLen_validation = self.proper_seq_len(seqLen_validation, 2**max_pool_num)
seqLen_test = self.proper_seq_len(seqLen_test, 2**max_pool_num)
trainLabel, trainLabel_mask = self.label_padding(trainLabel, self.num_tokens)
validationLabel, validationLabel_mask = self.label_padding(validationLabel, self.num_tokens)
total_data_train = []
for i in range(self.num_train):
data_train = dict()
data_train['index'] = i
data_train['img'] = trainImg[i]
data_train['img_len'] = seqLen_train[i]
data_train['label'] = trainLabel[i]
data_train['label_mask'] = trainLabel_mask[i]
total_data_train.append(data_train)
total_data_valid = []
for i in range(self.num_valid):
data_valid = dict()
data_valid['index'] = i
data_valid['img'] = validationImg[i]
data_valid['img_len'] = seqLen_validation[i]
data_valid['label'] = validationLabel[i]
data_valid['label_mask'] = validationLabel_mask[i]
total_data_valid.append(data_valid)
return total_data_train, total_data_valid
def shuffle(self):
np.random.shuffle(self.total_data_train)
# data: [{'index':, 'img':, 'img_len':, 'label':, 'label_mask':}]
def createGT(self, train=True):
if not os.path.exists('pred_logs'):
os.makedirs('pred_logs')
if train:
file_name = 'pred_logs/train_groundtruth.dat'
#total_num = self.num_train
data = self.total_data_train
else:
file_name = 'pred_logs/test_groundtruth.dat'
#total_num = self.num_valid
data = self.total_data_valid
with open(file_name, 'w') as f:
#num = total_num - (total_num%self.batch_size)
#for i in range(num):
# element = data[i]
for element in data:
f.write(str(element['index'])+' ')
for i in element['label'][1:]: # remove the first <GO>
if i == self.tokens['END_TOKEN']:
break
else:
if i == self.tokens['GO_TOKEN']:
f.write('<GO>')
elif i == self.tokens['PAD_TOKEN']:
f.write('<PAD>')
else:
f.write(self.index2letter[i-self.num_tokens])
f.write('\n')
def sampler(self): # should be shuffled before call this func
data_train = self.total_data_train
batches = self.num_train // self.batch_size
while True:
for i in range(batches):
data_slice = data_train[i*self.batch_size: (i+1)*self.batch_size]
index = []
in_data = []
out_data = []
out_data_mask = []
in_len = []
for d in data_slice:
index.append(d['index'])
in_data.append(d['img'])
out_data.append(d['label'])
out_data_mask.append(d['label_mask'])
in_len.append(d['img_len'])
yield {'index_sa': index, 'input_sa': in_data, 'output_sa': out_data, 'out_len_sa': out_data_mask, 'in_len_sa': in_len}
def sampler_t(self):
data_valid = self.total_data_valid
batches = self.num_valid // self.batch_size
while True:
for i in range(batches):
data_slice = data_valid[i*self.batch_size: (i+1)*self.batch_size]
index = []
in_data = []
out_data = []
out_data_mask = []
in_len = []
for d in data_slice:
index.append(d['index'])
in_data.append(d['img'])
out_data.append(d['label'])
out_data_mask.append(d['label_mask'])
in_len.append(d['img_len'])
yield {'index_sa_t': index, 'input_sa_t': in_data, 'output_sa_t': out_data, 'out_len_sa_t': out_data_mask, 'in_len_sa_t': in_len}
def proper_seq_len(self, seqLen, timeRatio):
return [int(l/timeRatio) for l in seqLen]
if __name__ == '__main__':
dataModel = preProcess()
dataModel.createGT(True)
dataModel.createGT(False)
sample = dataModel.sampler()
data = sample.__next__()
print(len(data['output_sa']))
print(data['output_sa'])
|
import os
import os.path
import sys
from maintenance.stubs import packagestubs
def pymelstubs(extensions=('py', 'pypredef', 'pi'),
modules=('pymel', 'maya', 'PySide2', 'shiboken2'),
skip_module_regex=None,
pyRealUtil=False):
""" Builds pymel stub files for autocompletion.
Can build Python Interface files (pi) with extension='pi' for IDEs like
wing.
"""
buildFailures = []
pymeldir = os.path.dirname(os.path.dirname(sys.modules[__name__].__file__))
outputdir = os.path.join(pymeldir, 'extras', 'completion')
print "Stub output dir:", outputdir
if not os.path.exists(outputdir):
os.makedirs(outputdir)
importExclusions = {
'pymel.api': set(['pymel.internal.apicache']),
'pymel': set(['pymel.all']),
'maya.precomp': set(['precompmodule']),
}
def filterImports(current, modules, imported, importall_modules):
if importall_modules: # from MODULE import *
# special-case handling for pymel.internal.pmcmds, which ends up
# with a bunch of 'from pymel.core.X import *' commands
if current == 'pymel.internal.pmcmds':
importall_modules = [
x for x in importall_modules
if not getattr(x, '__name__', 'pymel.core').startswith
('pymel.core')]
imported = [(obj, names, source_module)
for obj, names, source_module in imported
if not getattr(source_module, '__name__',
'pymel.core').startswith
('pymel.core')]
if not any(x.__name__ == 'maya.cmds' for x in
importall_modules):
import maya.cmds
importall_modules.append(maya.cmds)
return modules, imported, importall_modules
for modulename in modules:
try:
print "making stubs for: %s" % modulename
packagestubs(modulename, outputdir=outputdir, extensions=extensions,
skip_module_regex=skip_module_regex,
import_exclusions=importExclusions,
import_filter=filterImports,
debugmodules={'pymel.core'}, stubmodules=modules)
except Exception as err:
import traceback
buildFailures.append((modulename, err, traceback.format_exc()))
if pyRealUtil:
# build a copy of 'py' stubs, that have a REAL copy of pymel.util...
# useful to put on the path of non-maya python interpreters, in
# situations where you want to be able to import the "dummy" maya/pymel
# stubs, but still have acces to the handy non-maya-required pymel.util
def copyDir(src, dest):
# ignore if the source dir doesn't exist...
if os.path.isdir(src):
import shutil
if os.path.isdir(dest):
shutil.rmtree(dest)
elif os.path.isfile(dest):
raise RuntimeError(
"A file called %s existed (expected a dir "
"or nothing)" % dest)
shutil.copytree(src, dest)
elif os.path.isfile(src):
raise RuntimeError(
"A file called %s existed (expected a dir "
"or nothing)" % src)
pyDir = os.path.join(outputdir, 'py')
pyRealUtilDir = os.path.join(outputdir, 'pyRealUtil')
print "creating %s" % pyRealUtilDir
copyDir(pyDir, pyRealUtilDir)
srcUtilDir = os.path.join(pymeldir, 'pymel', 'util')
destUtilDir = os.path.join(pyRealUtilDir, 'pymel', 'util')
copyDir(srcUtilDir, destUtilDir)
if buildFailures:
indent = ' '
print "WARNING! Module specified failed to build :"
for failedModule, err, traceStr in buildFailures:
print "{}{} - {}".format(indent, failedModule, err)
print indent * 2 + traceStr.replace('\n', '\n' + indent * 2)
print "(Try specify different list of modules for 'modules' keyword " \
"argument)"
return outputdir
|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import json
import os
import re
from nltk.corpus import stopwords
from gensim.utils import simple_preprocess
import spacy
from gensim.models.phrases import Phrases, Phraser
def set_as_continous(video_id,channel_dir):
text_json = []
video_file_path = os.path.join(channel_dir,str(video_id + '.json'))
with open(video_file_path,'r') as f:
text_json = json.load(f)
text = ""
for curr_json in text_json:
text = text + " " + curr_json['text']
return text
@click.command()
@click.argument('input_channel')
<EMAIL>('input_filepath', type=click.Path(exists=True))
<EMAIL>.argument('output_filepath', type=click.Path(),)
#def main(input_filepath, output_filepath):
def main(input_channel): #="nptelhrd"
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info('making Intermidiate data set from raw data')
#spacy_nlp = spacy.load('en_core_web_sm')
stop_words = stopwords.words('english')
channel_dir = os.path.join(raw_dir,"transcripts",input_channel)
videos_transcript_file = os.listdir(channel_dir)
channel_intermid_dir = os.path.join(interim_dir,input_channel)
if not os.path.exists(channel_intermid_dir):
os.mkdir(channel_intermid_dir)
else:
print("\tThe folder of the channel %s is already exist\n"
"\tdelete it before executing this script -"
"we don't want to override your data" %(channel_intermid_dir))
return
for file in videos_transcript_file:
channel_intermid_file = os.path.join(channel_intermid_dir,file)
video_id = file.split('.')[0]
text = set_as_continous(video_id,channel_dir)
'''
Processing the data
'''
print('Set the text as lower cases')
text = text.lower()
document_text = text.split('.')
print('Remove special character')
document_text =list(map(lambda d: re.sub(r"[-()\"#/@;:<>{}`+=~|!?,\n]", " ", d),document_text)) #
print("Tokinzing each sentence")
tokenized_documents = list(map(lambda doc: simple_preprocess(doc,deacc=True),document_text))
print("Remove stop words")
tokenized_documents_non_stop_words = [[word for word in doc if word not in stop_words] for doc in tokenized_documents]
# form Bigrams - make sure u read the full documantation to utilize the framework as should be!
# Save the bigram model to a picke file
print("Form Bigrams")
bigram = Phrases(tokenized_documents, min_count=5, threshold=100) # higher threshold fewer phrases.
#trigram = Phrases(bigram[tokenized_documents], threshold=100)
bigram_mod = Phraser(bigram)
#trigram_mod = Phraser(trigram)
documents_bigrams = [bigram_mod[doc] for doc in tokenized_documents_non_stop_words]
print("Form lemmatization")
nlp = spacy.load('en',disable=['parser','ner'])
documents_lemmatized = []
allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']
for sent in documents_bigrams:
doc = nlp(" ".join(sent))
current_doc = [token.lemma_ for token in doc if token.pos_ in allowed_postags]
if len(current_doc) > 0:
documents_lemmatized.append(current_doc)
print("Writing to a file the intermidate data")
with open(channel_intermid_file,'w') as f:
json.dump(documents_lemmatized,f)
print('Finish processing video %s' %(video_id))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
raw_dir = os.path.join(project_dir,"data","raw")
interim_dir = os.path.join(project_dir,"data","interim")
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
<reponame>Westlake-AI/openmixup<gh_stars>1-10
import torch.nn as nn
from mmcv.cnn import kaiming_init, normal_init, ConvModule
from ..registry import NECKS
@NECKS.register_module
class ConvNeck(nn.Module):
"""The N layers conv neck: [conv-norm-act] - conv-{norm}.
Args:
in_channels (int): Channels of the input feature map.
hid_channels (int): Channels of the hidden feature channel.
out_channels (int): Channels of the output feature channel.
num_layers (int): The number of convolution layers.
kernel_size (int): Kernel size of the convolution layer.
stride (int): Stride of the convolution layer.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
with_bias (bool): Whether to use 'bias' in each conv layer. Default: False.
with_avg_pool (bool): Whether to add a global average pooling layer in the
output. Default: False.
with_last_norm (bool): Whether to add a norm layer in the output. Default: False.
with_last_dropout (float or dict): Probability of an element to be zeroed in
the output, or dict config for dropout.
Default: 0.0.
with_residual (bool, optional): Add resudual connection.
Default: False.
with_pixel_shuffle (bool or int): Whether to use nn.PixelShuffle() to
upsampling to feature maps. Default: False (0).
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
num_layers=2,
kernel_size=1,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ELU'),
with_bias=False,
with_avg_pool=False,
with_last_norm=False,
with_last_dropout=0.,
with_residual=False,
with_pixel_shuffle=False,
**kwargs):
super(ConvNeck, self).__init__()
# basic args
self.in_channels = int(in_channels)
self.hid_channels = int(hid_channels)
self.out_channels = int(out_channels)
self.num_layers = int(num_layers)
self.kernel_size = int(kernel_size)
self.stride = int(stride)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
assert act_cfg is None or isinstance(act_cfg, dict)
assert kernel_size >= 1 and stride >= 1
# specific for ssl
self.with_bias = bool(with_bias)
self.with_avg_pool = bool(with_avg_pool)
self.with_last_norm = bool(with_last_norm)
self.with_residual = bool(with_residual)
self.with_pixel_shuffle = int(with_pixel_shuffle)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) \
if with_avg_pool else nn.Identity()
if isinstance(with_last_dropout, dict):
_type = with_last_dropout.pop('type', None)
_prob = with_last_dropout.pop('prob', 0.)
assert 0 < _prob and _prob < 1 and \
_type in ["Dropout", "AlphaDropout", "FeatureAlphaDropout"]
self.dropout = eval("nn.{}".format(_type))(_prob)
elif float(with_last_dropout) > 0:
assert float(with_last_dropout) < 1.
self.dropout = nn.Dropout(float(with_last_dropout))
else:
self.dropout = nn.Identity()
# build FFN
layers = []
for i in range(num_layers):
layers.append(
ConvModule(
in_channels=in_channels if i == 0 else hid_channels,
out_channels=hid_channels if i != num_layers-1 else out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
bias=with_bias,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if i != num_layers-1 or with_last_norm else None,
act_cfg=act_cfg if (i != num_layers-1) or (num_layers == 1) else None
))
if with_pixel_shuffle >= 2:
assert with_pixel_shuffle % 2 == 0
layers.append(nn.PixelShuffle(int(with_pixel_shuffle)))
self.conv = nn.Sequential(*layers)
self.init_weights()
def init_weights(self, init_linear='normal', std=0.01, bias=0.):
assert init_linear in ['normal', 'kaiming'], \
"Undefined init_linear: {}".format(init_linear)
for m in self.modules():
if isinstance(m, (nn.Linear, nn.Conv2d)):
if init_linear == 'normal':
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert len(x) == 1, "Got: {}".format(len(x))
res = x[0]
x = self.dropout(self.conv(x[0]))
if self.with_avg_pool:
x = self.avgpool(x).view(x.size(0), -1)
if self.with_residual:
x = x + res
return [x]
|
#-*- encoding: utf-8 -*-
# python client for openstf STFService & Agent.
#
# Api:
# start(adbprefix=None, service_port=1100, agent_port=1090)
# stop(adbprefix=None, service_port=1100, agent_port=1090)
#
# wake() # return None
# type(text) # return None
# ascii_type(text) # return None
# keyevent(key, event='PRESS') # return None
# keyboard(char, holdtime=None) # return None
#
# identify() # return bool(success or not)
# set_rotation(rotation, lock=False) # return bool(success or not)
# set_wifi_enabled(True) # return bool(success or not)
# get_wifi_status() # return a dict
# get_display() # return a dict
# get_properties(*args) # return a dict
#
# on_battery_event(callback) # callback accept a dict argument
# on_rotation_event(callback) # callback accept a int argument
# on_connectivity_event(callback) # callback accept a dict argument
import time
import subprocess
import select
import socket
import traceback
import Queue
import threading
import warnings
#from google.protobuf.descriptor import FieldDescriptor
from google.protobuf.internal import encoder, decoder
import stfwire_pb2 as wire
import keycode
__all__ = ['start', 'stop', 'isalive', 'identify', 'type', 'keyevent', 'get_wifi_status', 'set_wifi_enabled',
'set_rotation', 'get_display', 'get_properties', 'on_battery_event', 'on_rotation_event']
messages = {
wire.DO_IDENTIFY : (wire.DoIdentifyRequest, wire.DoIdentifyResponse),
wire.DO_KEYEVENT : (wire.KeyEventRequest, None),
wire.DO_TYPE: (wire.DoTypeRequest, None),
wire.DO_WAKE: (wire.DoWakeRequest, None),
wire.DO_ADD_ACCOUNT_MENU: (wire.DoAddAccountMenuRequest, wire.DoAddAccountMenuResponse),
wire.DO_REMOVE_ACCOUNT: (wire.DoRemoveAccountRequest, wire.DoRemoveAccountResponse),
wire.GET_ACCOUNTS: (wire.GetAccountsRequest, wire.GetAccountsResponse),
wire.GET_BROWSERS: (wire.GetBrowsersRequest, wire.GetBrowsersResponse),
wire.GET_CLIPBOARD: (wire.GetClipboardRequest, wire.GetClipboardResponse),
wire.GET_DISPLAY: (wire.GetDisplayRequest, wire.GetDisplayResponse),
wire.GET_PROPERTIES: (wire.GetPropertiesRequest, wire.GetPropertiesResponse),
wire.GET_RINGER_MODE: (wire.GetRingerModeRequest, wire.GetRingerModeResponse),
wire.GET_SD_STATUS: (wire.GetSdStatusRequest, wire.GetSdStatusResponse),
wire.GET_VERSION: (wire.GetVersionRequest, wire.GetVersionResponse),
wire.GET_WIFI_STATUS: (wire.GetWifiStatusRequest, wire.GetWifiStatusResponse),
wire.SET_CLIPBOARD: (wire.SetClipboardRequest, wire.SetClipboardResponse),
wire.SET_KEYGUARD_STATE: (wire.SetKeyguardStateRequest, wire.SetKeyguardStateResponse),
wire.SET_RINGER_MODE: (wire.SetRingerModeRequest, wire.SetRingerModeResponse),
wire.SET_ROTATION: (wire.SetRotationRequest, None),
wire.SET_WAKE_LOCK: (wire.SetWakeLockRequest, wire.SetWakeLockResponse),
wire.SET_WIFI_ENABLED: (wire.SetWifiEnabledRequest, wire.SetWifiEnabledResponse),
wire.SET_MASTER_MUTE: (wire.SetMasterMuteRequest, wire.SetMasterMuteResponse),
wire.EVENT_AIRPLANE_MODE: (None, wire.AirplaneModeEvent),
wire.EVENT_BATTERY: (None, wire.BatteryEvent),
wire.EVENT_CONNECTIVITY: (None, wire.ConnectivityEvent),
wire.EVENT_PHONE_STATE: (None, wire.PhoneStateEvent),
wire.EVENT_ROTATION: (None, wire.RotationEvent),
wire.EVENT_BROWSER_PACKAGE: (None, wire.BrowserPackageEvent),
}
def pack(mtype, request, rid=None):
'''pack request to delimited data'''
envelope = wire.Envelope()
if rid is not None:
envelope.id = rid
envelope.type = mtype
envelope.message = request.SerializeToString()
data = envelope.SerializeToString()
data = encoder._VarintBytes(len(data)) + data
return data
def unpack(data):
'''unpack from delimited data'''
size, position = decoder._DecodeVarint(data, 0)
envelope = wire.Envelope()
envelope.ParseFromString(data[position:position+size])
return envelope
def _id_generator():
request_id = [0]
def _next():
if request_id[0] == 0xffffffff: #uint32
request_id[0] = 0
request_id[0] += 1
return request_id[0]
return _next
get_request_id = _id_generator()
eventhooks = {} # called when events arrived
# initialize eventhooks
for mtype, (req_class, _) in messages.iteritems():
if req_class is None:
eventhooks[mtype] = []
del mtype, req_class
responses = {} # save service call response, for synchronize
service_response_lock = threading.Lock()
def route(envelope):
_, resp_class = messages[envelope.type]
resp = resp_class()
resp.ParseFromString(envelope.message)
# service calls should have id
if envelope.id:
with service_response_lock:
rid = envelope.id
# remove placeholder if the response is not needed
if rid in responses:
del responses[rid]
else:
responses[rid] = resp
return
# handle events
hooks = eventhooks.get(envelope.type)
if hooks is not None:
for func in hooks:
try:
func(resp)
except:
traceback.print_exc()
return
# no handler found
warnings.warn('No handler found for %s(%s)' % (resp_class.DESCRIPTOR.name, envelope.type))
def wait_response(rid, timeout=1):
timeout = time.time() + timeout
while timeout > time.time():
with service_response_lock:
if rid in responses:
return responses.pop(rid)
time.sleep(0.1)
# cleanup timeouted response, avoid memory leak by add a placeholder!
with service_response_lock:
if rid in responses:
del responses[rid]
else:
responses[rid] = None
def register_eventhook(mtype, callback):
global eventhooks
eventhooks[mtype].append(callback)
def start_stf_service(adbprefix=None, port=1100):
if adbprefix is None:
adbprefix = ['adb']
cmds = [['shell', 'am', 'startservice'],
['--user', '0'],
['-a', 'jp.co.cyberagent.stf.ACTION_START', '-n', 'jp.co.cyberagent.stf/.Service']]
command = adbprefix + cmds[0] + cmds[1] + cmds[2]
print subprocess.check_output(command)
# if falied, using:
# command = cmds[0] + cmds[2]
command = adbprefix + ['forward', 'tcp:%s' % port, 'tcp:1100'] # remote port use default 1100, although it can be changed
subprocess.call(command)
def stop_stf_service(adbprefix=None, port=1100):
if adbprefix is None:
adbprefix = ['adb']
command = adbprefix + ['forward', '--remove', 'tcp:%s' % port]
subprocess.call(command)
command = adbprefix + ['shell', 'am', 'stopservice', '-n', 'jp.co.cyberagent.stf/.Service']
subprocess.call(command)
def check_stf_agent(adbprefix=None, kill=False):
'''return True if agent is alive.'''
if adbprefix is None:
adbprefix = ['adb']
command = adbprefix + ['shell', 'ps']
out = subprocess.check_output(command).strip()
out = out.splitlines()
if len(out) > 1:
first, out = out[0], out[1:]
idx = first.split().index('PID')
pid = None
for line in out:
if 'stf.agent' in line:
pid = line.split()[idx]
print 'stf.agent is running, pid is', pid
break
if pid is not None:
if kill:
print 'killing', pid
command = adbprefix + ['shell', 'kill', '-9', pid]
subprocess.call(command)
return False
return True
return False
def start_stf_agent(adbprefix=None, restart=False, port=1090):
if adbprefix is None:
adbprefix = ['adb']
if check_stf_agent(adbprefix, kill=restart):
return
command = adbprefix + ['shell', 'pm', 'path', 'jp.co.cyberagent.stf']
out = subprocess.check_output(command).strip()
path = out.split(':')[-1]
print 'stf agent path', repr(path)
command = adbprefix + ['shell', 'CLASSPATH="%s"' % path,
'app_process', '/system/bin', 'jp.co.cyberagent.stf.Agent']
p = subprocess.Popen(command, stdout=subprocess.PIPE)
# IMPORTANT! wait for agent to start listening.
p.stdout.readline()
command = adbprefix + ['forward', 'tcp:%s' % port, 'tcp:1090'] # remote port is 1090, cannot change
subprocess.call(command)
def stop_stf_agent(adbprefix=None, port=1090):
if adbprefix is None:
adbprefix = ['adb']
command = adbprefix + ['forward', '--remove', 'tcp:%s' % port]
subprocess.call(command)
check_stf_agent(adbprefix, kill=True)
service_queue = Queue.Queue()
agent_queue = Queue.Queue()
stop_event = threading.Event()
def isalive():
return stop_event.isSet()
def listen_service(service_port=1100):
# service, send & recv, use timeout
def _service():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', service_port))
while not stop_event.isSet():
r, w, _ = select.select([s], [s], [], 0.1)
if s in r:
data = s.recv(1024)
if not data:
continue
try:
envelope = unpack(data)
route(envelope)
except:
print 'error while handle response'
traceback.print_exc()
if s in w:
try:
message = service_queue.get_nowait()
except Queue.Empty:
pass
else:
s.sendall(message)
except:
traceback.print_exc()
finally:
s.close()
print 'Service socket closed'
stop_event.set()
t = threading.Thread(target=_service)
t.setDaemon(True)
t.start()
def listen_agent(agent_port=1090):
# just send, no recv
def _agent():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', agent_port))
while not stop_event.isSet():
message = agent_queue.get()
s.sendall(message)
except:
traceback.print_exc()
finally:
s.close()
print 'Agent socket closed.'
stop_event.set()
t = threading.Thread(target=_agent)
t.setDaemon(True)
t.start()
#--------------------------- API functions ----------------------------
def start(adbprefix=None, service_port=1100, agent_port=1090):
start_stf_service(adbprefix, port=service_port)
start_stf_agent(adbprefix, port=agent_port)
stop_event.clear()
listen_service(service_port)
listen_agent(agent_port)
def stop(adbprefix=None, service_port=1100, agent_port=1090):
stop_event.set()
stop_stf_service(adbprefix, port=service_port)
stop_stf_agent(adbprefix, port=agent_port)
# agent
def keyevent(key, event=wire.PRESS, shift=False, ctrl=False, alt=False, meta=False,
sym=False, function=False, capslock=False, scrolllock=False, numlock=False):
if isinstance(event, basestring):
event = event.upper()
assert event in ('DOWN', 'UP', 'PRESS')
event = getattr(wire, event)
if isinstance(key, basestring):
if not key.startswith('KEYCODE_'):
key = 'KEYCODE_%s' % key
key = getattr(keycode, key)
req = wire.KeyEventRequest()
req.event = event
req.keyCode = key
req.shiftKey = shift
req.ctrlKey = ctrl
req.altKey = alt
req.metaKey = meta
req.symKey = sym
req.functionKey = function
req.capsLockKey = capslock
req.scrollLockKey = scrolllock
req.numLockKey = numlock
msg = pack(wire.DO_KEYEVENT, req)
agent_queue.put(msg)
# simple keyboard input
def keyboard(char, holdtime=None):
shift = False
ctrl = False
if len(char) > 1:
char = char.upper()
if not char.startswith('KEYCODE_'):
char = 'KEYCODE_%s' % char
code = getattr(keycode, char, None)
if code is None:
print 'invalid keycode', char
return
elif char in '<KEY>':
code = getattr(keycode, 'KEYCODE_%s' % char.upper())
elif char in 'ABCDEFGHIJLKMNOPQRSTUVWXYZ':
code = getattr(keycode, 'KEYCODE_%s' % char)
shift = True
elif char in keycode.SHIFTED_KEYS:
code = keycode.SHIFTED_KEYS[char]
shift = True
elif char in keycode.KEYBOARD_KEYS:
code = keycode.KEYBOARD_KEYS[char]
elif char in keycode.CTRLED_KEYS:
code = keycode.CTRLED_KEYS[char]
ctrl = True
else:
print 'invalid char', repr(char)
return
if holdtime is None:
req = wire.KeyEventRequest(event=wire.PRESS, keyCode=code, shiftKey=shift, ctrlKey=ctrl)
msg = pack(wire.DO_KEYEVENT, req)
agent_queue.put(msg)
else:
# keydown
req = wire.KeyEventRequest(event=wire.DOWN, keyCode=code, shiftKey=shift, ctrlKey=ctrl)
msg = pack(wire.DO_KEYEVENT, req)
agent_queue.put(msg)
# wait
time.sleep(holdtime)
# keyup
req = wire.KeyEventRequest(event=wire.UP, keyCode=code, shiftKey=shift, ctrlKey=ctrl)
msg = pack(wire.DO_KEYEVENT, req)
agent_queue.put(msg)
# agent, can input asciis only
def ascii_type(text):
msg = pack(wire.DO_TYPE, wire.DoTypeRequest(text=text))
agent_queue.put(msg)
# agent
def set_rotation(rotation, lock=False):
msg = pack(wire.SET_ROTATION, wire.SetRotationRequest(rotation=rotation, lock=lock))
agent_queue.put(msg)
# agent
def wake():
msg = pack(wire.DO_WAKE, wire.DoWakeRequest())
agent_queue.put(msg)
# use both agent & service to input unicode characters
# TODO make it atomic, maybe use another queue?
def type(text):
rid = get_request_id()
msg = pack(wire.SET_CLIPBOARD, wire.SetClipboardRequest(type=wire.TEXT, text=text), rid)
service_queue.put(msg)
# wait for clipboard result
resp = wait_response(rid)
if resp and resp.success:
keyevent(keycode.KEYCODE_V, wire.PRESS, ctrl=True)
return True
return False
def identify():
rid = get_request_id()
serial = ""
msg = pack(wire.DO_IDENTIFY, wire.DoIdentifyRequest(serial=serial), rid)
service_queue.put(msg)
resp = wait_response(rid)
return resp and resp.success
def get_wifi_status():
# return True if wifi is enabled.
rid = get_request_id()
msg = pack(wire.GET_WIFI_STATUS, wire.GetWifiStatusRequest(), rid)
service_queue.put(msg)
resp = wait_response(rid)
return resp and resp.success and resp.status
def set_wifi_enabled(enabled):
rid = get_request_id()
msg = pack(wire.SET_WIFI_ENABLED, wire.SetWifiEnabledRequest(enabled=bool(enabled)), rid)
service_queue.put(msg)
resp = wait_response(rid, timeout=5) # may ask for user permission
return resp and resp.success
def get_display(deviceid=0):
rid = get_request_id()
msg = pack(wire.GET_DISPLAY, wire.GetDisplayRequest(id=deviceid), rid)
service_queue.put(msg)
resp = wait_response(rid)
if not resp or not resp.success:
return {}
fields = [f.name for f in wire.GetDisplayResponse.DESCRIPTOR.fields]
data = dict([(f, getattr(resp, f)) for f in fields])
return data
def get_properties(*args):
rid = get_request_id()
msg = pack(wire.GET_PROPERTIES, wire.GetPropertiesRequest(properties=["ro.product.device"]))
service_queue.put(msg)
resp = wait_response(rid)
res = {}
if not resp or not resp.success:
return res
for prop in resp.properties:
res[prop.name] = prop.value
return res
def on_battery_event(callback):
fields = [f.name for f in wire.BatteryEvent.DESCRIPTOR.fields]
def _cb(resp):
data = dict([(f, getattr(resp, f)) for f in fields])
callback(data)
register_eventhook(wire.EVENT_BATTERY, _cb)
def on_rotation_event(callback):
def _cb(resp):
callback(resp.rotation)
register_eventhook(wire.EVENT_ROTATION, _cb)
def on_connectivity_event(callback):
fields = [f.name for f in wire.ConnectivityEvent.DESCRIPTOR.fields]
def _cb(resp):
data = dict([(f, getattr(resp, f)) for f in fields])
callback(data)
register_eventhook(wire.EVENT_CONNECTIVITY, _cb)
|
<gh_stars>0
import requests
import json
def make_kml_stop_without_names(overpass_base_url, kml_wrapper) :
overpass_url = overpass_base_url + '[out:json][timeout:125];area(3600008649)->.area;node["highway"="bus_stop"][!"name"][!"disused"](area.area);out skel;'
#overpass_url = overpass_base_url + '[out:json][timeout:125];area(3600402773)->.area;node["highway"="bus_stop"][!"name"][!"disused"](area.area);out skel;'
overpass_call = requests.get(overpass_url)
if overpass_call.status_code != 200:
print ("KO à l'appel Overpass des bus sans nom")
exit(1)
overpass_result = overpass_call.json()
for elem in overpass_result['elements']:
kml_template = """
<Placemark>
<name>Compléter le nom de cet arrêt de bus</name>
<styleUrl>#placemark-blue</styleUrl>
<Point><coordinates>%%kml_lon%%,%%kml_lat%%</coordinates></Point>
</Placemark>"""
kml_template = kml_template.replace("%%kml_lat%%", str(elem['lat']))
kml_template = kml_template.replace("%%kml_lon%%", str(elem['lon']))
kml_wrapper += kml_template
kml_wrapper += """
</Document>
</kml>
"""
kml_wrapper = kml_wrapper.replace("%%kml_name%%", "Arrêts de bus sans nom")
with open("bussansnom.kml", "w") as xml_out_file :
xml_out_file.write(kml_wrapper)
print("il y a {} arrêts de bus sans nom".format(len(overpass_result['elements'])))
def make_kml_stop_orphan(overpass_base_url, kml_wrapper) :
overpass_url = overpass_base_url + '[out:json][timeout:225];area(3600008649);node(area)["highway"="bus_stop"][!"disused"]->.all;relation(bn.all)[type=route];node(r);( .all; - ._; );out skel;'
#overpass_url = overpass_base_url + '[out:json][timeout:125];area(3600402773);node(area)["highway"="bus_stop"][!"disused"]->.all;relation(bn.all)[type=route];node(r);( .all; - ._; );out skel;'
overpass_call = requests.get(overpass_url)
if overpass_call.status_code != 200:
print ("KO à l'appel Overpass des bus orphelins")
exit(1)
overpass_result = overpass_call.json()
for elem in overpass_result['elements']:
kml_template = """
<Placemark>
<name>Ajouter les lignes qui passent à cet arrêt</name>
<styleUrl>#placemark-pink</styleUrl>
<Point><coordinates>%%kml_lon%%,%%kml_lat%%</coordinates></Point>
<description><![CDATA[<a href="https://microcosm.5apps.com/poi.html?poi_type=bus_stop#18/%%kml_lat%%/%%kml_lon%%">AJOUTER DES LIGNES</a>]]></description>
</Placemark>"""
kml_template = kml_template.replace("%%kml_lat%%", str(elem['lat']))
kml_template = kml_template.replace("%%kml_lon%%", str(elem['lon']))
kml_wrapper += kml_template
kml_wrapper += """
</Document>
</kml>
"""
kml_wrapper = kml_wrapper.replace("%%kml_name%%", "Arrêts de bus non desservi")
print("il y a {} arrêts de bus non desservi".format(len(overpass_result['elements'])))
with open("bussansligne.kml", "w") as xml_out_file :
xml_out_file.write(kml_wrapper)
def make_kml_stop_fixme(overpass_base_url, kml_wrapper) :
overpass_url = overpass_base_url + '[out:json][timeout:125];area(3600008649)->.area;node["highway"="bus_stop"]["fixme"][!"disused"](area.area);out body;'
#overpass_url = overpass_base_url + '[out:json][timeout:125];area(3600402773)->.area;node["highway"="bus_stop"]["FIXME"][!"disused"](area.area);out body;'
overpass_call = requests.get(overpass_url)
if overpass_call.status_code != 200:
print ("KO à l'appel Overpass des bus avec FIXME")
exit(1)
overpass_result = overpass_call.json()
for elem in overpass_result['elements']:
kml_template = """
<Placemark>
<name>Vérifier cet arrêt de bus</name>
<styleUrl>#placemark-brown</styleUrl>
<Point><coordinates>%%kml_lon%%,%%kml_lat%%</coordinates></Point>
<description><![CDATA[FIXME : %%FIXME%%<br><a href="https://microcosm.5apps.com/poi.html?poi_type=bus_stop#18/%%kml_lat%%/%%kml_lon%%">AJOUTER DES LIGNES</a>]]></description>
</Placemark>"""
kml_template = kml_template.replace("%%kml_lat%%", str(elem['lat']))
kml_template = kml_template.replace("%%kml_lon%%", str(elem['lon']))
kml_template = kml_template.replace("%%FIXME%%", elem['tags']['fixme'])
kml_wrapper += kml_template
kml_wrapper += """
</Document>
</kml>
"""
kml_wrapper = kml_wrapper.replace("%%kml_name%%", "Arrêts de bus à vérifier")
print("il y a {} arrêts de bus à vérifier".format(len(overpass_result['elements'])))
with open("busfixme.kml", "w") as xml_out_file :
xml_out_file.write(kml_wrapper)
if __name__ == '__main__':
overpass_base_url = 'http://overpass-api.de/api/interpreter?data='
kml_wrapper = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.2">
<Document>
<Style id="placemark-blue">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-blue.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-brown">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-brown.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-green">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-green.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-orange">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-orange.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-pink">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-pink.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-purple">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-purple.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-red">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-red.png</href>
</Icon>
</IconStyle>
</Style>
<Style id="placemark-yellow">
<IconStyle>
<Icon>
<href>http://mapswith.me/placemarks/placemark-yellow.png</href>
</Icon>
</IconStyle>
</Style>
<name>%%kml_name%%</name>
<visibility>1</visibility>
"""
##make_kml_stop_without_names(overpass_base_url, kml_wrapper)
##make_kml_stop_orphan(overpass_base_url, kml_wrapper)
make_kml_stop_fixme(overpass_base_url, kml_wrapper)
|
<filename>piper/test/test_io.py
from piper.io import list_files
from piper.io import read_csv
from piper.io import read_text
from piper.io import to_tsv
from piper.io import write_text
from piper.io import zip_data
from piper.factory import bad_quality_orders
import pandas as pd
import pytest
directory = 'piper/temp/sql'
# sql {{{1
@pytest.fixture
def sql():
return ''' select * from eudta.f0006 where rownum=1'''
# t_bad_quality_orders {{{1
@pytest.fixture
def t_bad_quality_orders():
return bad_quality_orders()
# t_write_text_file {{{1
@pytest.fixture
def t_write_text_file():
''' write sample text file '''
filename = 'piper/temp/sample_text_file.txt'
text = 'some sample text'
write_text(filename, text)
# test_read_sql_valid_info_true {{{1
@pytest.mark.skip(reason="no way of currently testing this")
def test_read_sql_valid_info_true(sql):
""" """
env = 'JDE9E2P'
con, schema, schema_ctl = connect(env)
df = read_sql(sql, sql_info=True, con=con, info=False)
assert (1, 152) == df.shape
# test_read_sql_valid_info_false {{{1
@pytest.mark.skip(reason="no way of currently testing this")
def test_read_sql_valid_info_false(sql):
""" """
env = 'JDE9E2P'
con, schema, schema_ctl = connect(env)
df = read_sql(sql, sql_info=False, con=con, info=True)
assert (1, 152) == df.shape
# test_list_files_no_files() {{{1
def test_list_files_no_files():
source = 'piper/'
test_files = list_files(source=source, glob_pattern='*.px')
expected = []
actual = test_files
assert expected == actual
# test_list_files_with_data() {{{1
def test_list_files_with_data():
source = 'piper/'
test_files = list_files(source=source, glob_pattern='*.py')
actual = len(test_files)
assert actual == 17
# test_list_files_with_data_as_posix() {{{1
def test_list_files_with_data_as_posix():
source = 'piper/'
test_files = list_files(source=source, glob_pattern='*.py',
as_posix=True)
expected = True
actual = isinstance(test_files[0], str)
assert expected == actual
# test_read_csv_with_data {{{1
def test_read_csv_with_data(t_bad_quality_orders):
"""
"""
file_name = 'piper/temp/to_tsv_with_data.tsv'
df_json = pd.DataFrame(t_bad_quality_orders)
df = read_csv(file_name, sep='\t')
expected = df_json.shape
actual = df.shape
assert expected == actual
# test_read_text() {{{1
def test_read_text(t_write_text_file):
filename = 'piper/temp/sample_text_file.txt'
expected = ['some sample text']
assert expected == read_text(filename, count=False)
# test_to_tsv_with_data {{{1
def test_to_tsv_with_data(t_bad_quality_orders):
"""
Write sample dataframe, no value is return from to_tsv
"""
file_name = 'piper/temp/to_tsv_with_data.tsv'
df = pd.DataFrame(t_bad_quality_orders)
expected = None
actual = to_tsv(df, file_name, sep='\t')
assert expected == actual
# test_zip_data() {{{1
def test_zip_data(t_write_text_file):
filename = 'piper/temp/test_zip_file'
test_zip = zip_data(source='piper/temp', target=filename,
filter='*.txt', ts_prefix=False,
test_mode=False, mode='w', info=False)
expected = 1
actual = len(test_zip.namelist())
assert expected == actual
# test_zip_data_test_mode() {{{1
def test_zip_data_test_mode(t_write_text_file):
filename = 'piper/temp/test_zip_file'
test_zip = zip_data(source='temp', target=filename,
filter='*.sql', ts_prefix=False,
test_mode=True, mode='w', info=False)
expected = None
actual = test_zip
assert expected == actual
# test_zip_data_nofiles() {{{1
def test_zip_data_nofiles(t_write_text_file):
filename = 'piper/temp/test_zip_file'
test_zip = zip_data(source='temp', target=filename,
filter='*.nofiles', ts_prefix=False,
test_mode=False, mode='w', info=False)
expected = None
actual = test_zip
assert expected == actual
|
import os
import pytest
from bless.aws_lambda.bless_lambda import lambda_handler
from tests.ssh.vectors import EXAMPLE_RSA_PUBLIC_KEY, RSA_CA_PRIVATE_KEY_PASSWORD, \
EXAMPLE_ED25519_PUBLIC_KEY
class Context(object):
aws_request_id = 'bogus aws_request_id'
invoked_function_arn = 'bogus invoked_function_arn'
VALID_TEST_REQUEST = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1"
}
VALID_TEST_REQUEST_USERNAME_VALIDATION_EMAIL_REMOTE_USERNAMES_USERADD = {
"remote_usernames": "user,anotheruser",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "<EMAIL>",
"bastion_user_ip": "127.0.0.1"
}
VALID_TEST_REQUEST_USERNAME_VALIDATION_DISABLED = {
"remote_usernames": "'~:, \n\t@'",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "a33characterusernameyoumustbenuts",
"bastion_user_ip": "127.0.0.1"
}
INVALID_TEST_REQUEST = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "invalid_ip",
"bastion_user": "user",
"bastion_user_ip": "invalid_ip"
}
VALID_TEST_REQUEST_KMSAUTH = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1",
"kmsauth_token": "valid<PASSWORD>aut<PASSWORD>ken",
}
INVALID_TEST_REQUEST_KEY_TYPE = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_ED25519_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1"
}
INVALID_TEST_REQUEST_EXTRA_FIELD = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1",
"bastion_ip": "127.0.0.1" # Note this is now an invalid field.
}
INVALID_TEST_REQUEST_MISSING_FIELD = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1"
}
VALID_TEST_REQUEST_MULTIPLE_PRINCIPALS = {
"remote_usernames": "user1,user2",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1"
}
INVALID_TEST_REQUEST_MULTIPLE_PRINCIPALS = {
"remote_usernames": ",user#",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "user",
"bastion_user_ip": "127.0.0.1"
}
INVALID_TEST_REQUEST_USERNAME_INVALID = {
"remote_usernames": "user",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "~@.",
"bastion_user_ip": "127.0.0.1"
}
INVALID_TEST_KMSAUTH_REQUEST_USERNAME_DOESNT_MATCH_REMOTE = {
"remote_usernames": "userb",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "usera",
"bastion_user_ip": "127.0.0.1",
"kmsauth_token": "<PASSWORD>"
}
INVALID_TEST_KMSAUTH_REQUEST_DIFFERENT_REMOTE_USER = {
"remote_usernames": "root",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "usera",
"bastion_user_ip": "127.0.0.1",
"kmsauth_token": "<PASSWORD>"
}
VALID_TEST_KMSAUTH_REQUEST_DIFFERENT_REMOTE_USER = {
"remote_usernames": "alloweduser",
"public_key_to_sign": EXAMPLE_RSA_PUBLIC_KEY,
"command": "ssh user@server",
"bastion_ips": "127.0.0.1",
"bastion_user": "usera",
"bastion_user_ip": "127.0.0.1",
"kmsauth_token": "<PASSWORD>"
}
os.environ['AWS_REGION'] = 'us-west-2'
def test_basic_local_request():
output = lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_basic_local_unused_kmsauth_request():
output = lambda_handler(VALID_TEST_REQUEST_KMSAUTH, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_basic_local_missing_kmsauth_request():
output = lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-kmsauth.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_basic_local_username_validation_disabled(monkeypatch):
extra_environment_variables = {
'bless_ca_default_password': '<INSERT_DEFAULT_KMS_ENCRYPTED_BASE64_ENCODED_PEM_PASSWORD_HERE>',
'bless_ca_ca_private_key_file': 'tests/aws_lambda/only-use-for-unit-tests.pem',
'bless_options_username_validation': 'disabled',
'bless_options_remote_usernames_validation': 'disabled',
}
for k, v in extra_environment_variables.items():
monkeypatch.setenv(k, v)
output = lambda_handler(VALID_TEST_REQUEST_USERNAME_VALIDATION_DISABLED, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), ''))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_basic_local_username_validation_email_remote_usernames_useradd(monkeypatch):
extra_environment_variables = {
'bless_ca_default_password': '<INSERT_DEFAULT_KMS_ENCRYPTED_BASE64_ENCODED_PEM_PASSWORD_HERE>',
'bless_ca_ca_private_key_file': 'tests/aws_lambda/only-use-for-unit-tests.pem',
'bless_options_username_validation': 'email',
'bless_options_remote_usernames_validation': 'useradd',
}
for k, v in extra_environment_variables.items():
monkeypatch.setenv(k, v)
output = lambda_handler(VALID_TEST_REQUEST_USERNAME_VALIDATION_EMAIL_REMOTE_USERNAMES_USERADD, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), ''))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_invalid_username_request():
output = lambda_handler(INVALID_TEST_REQUEST_USERNAME_INVALID, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_invalid_kmsauth_request():
output = lambda_handler(VALID_TEST_REQUEST_KMSAUTH, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-kmsauth.cfg'))
assert output['errorType'] == 'KMSAuthValidationError'
def test_invalid_request():
output = lambda_handler(INVALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_local_request_key_not_found():
with pytest.raises(IOError):
lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test-broken.cfg'))
def test_local_request_config_not_found():
with pytest.raises(ValueError):
lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'none'))
def test_local_request_invalid_pub_key():
output = lambda_handler(INVALID_TEST_REQUEST_KEY_TYPE, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_local_request_extra_field():
output = lambda_handler(INVALID_TEST_REQUEST_EXTRA_FIELD, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_local_request_missing_field():
output = lambda_handler(INVALID_TEST_REQUEST_MISSING_FIELD, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_local_request_with_test_user():
output = lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__), 'bless-test-with-test-user.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_local_request_with_custom_certificate_extensions():
output = lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-with-certificate-extensions.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_local_request_with_empty_certificate_extensions():
output = lambda_handler(VALID_TEST_REQUEST, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-with-certificate-extensions-empty.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_local_request_with_multiple_principals():
output = lambda_handler(VALID_TEST_REQUEST_MULTIPLE_PRINCIPALS, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test.cfg'))
assert output['certificate'].startswith('ssh-rsa-cert-v01@openssh.com ')
def test_invalid_request_with_multiple_principals():
output = lambda_handler(INVALID_TEST_REQUEST_MULTIPLE_PRINCIPALS, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test.cfg'))
assert output['errorType'] == 'InputValidationError'
def test_invalid_request_with_mismatched_bastion_and_remote():
'''
Test default kmsauth behavior, that a bastion_user and remote_usernames must match
:return:
'''
output = lambda_handler(INVALID_TEST_KMSAUTH_REQUEST_USERNAME_DOESNT_MATCH_REMOTE, context=Context,
ca_private_key_password=RSA_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-kmsauth.cfg'))
assert output['errorType'] == 'KMSAuthValidationError'
def test_invalid_request_with_unallowed_remote():
output = lambda_handler(INVALID_TEST_KMSAUTH_REQUEST_DIFFERENT_REMOTE_USER, context=Context,
ca_private_key_password=<PASSWORD>_CA_PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-kmsauth-different-remote.cfg'))
assert output['errorType'] == 'KMSAuthValidationError'
def test_valid_request_with_allowed_remote(mocker):
mocker.patch("kmsauth.KMSTokenValidator.decrypt_token")
output = lambda_handler(VALID_TEST_KMSAUTH_REQUEST_DIFFERENT_REMOTE_USER, context=Context,
ca_private_key_password=<PASSWORD>PRIVATE_KEY_PASSWORD,
entropy_check=False,
config_file=os.path.join(os.path.dirname(__file__),
'bless-test-kmsauth-different-remote.cfg'))
assert output['certificate'].startswith('ssh-<EMAIL> ')
|
<reponame>lucien-sim/cloudsat-viz
#!/usr/bin/python3
import os
import pickle
import geopandas as gpd
import pandas as pd
import numpy as np
from shapely.geometry import Polygon, MultiPolygon, Point, box
import json
from rasterio.warp import calculate_default_transform, reproject, Resampling
from rasterio.transform import from_bounds
import datetime
from netCDF4 import Dataset
from global_variables import data_path,default_crs
class radarPass:
"""Class to hold all relevant data for a given radar pass."""
def __init__(self, lon, lat, timestamp, height, radar_refl, cpr_cloud_mask):
# Data for the full pass
self.cloudsat = {}
self.cloudsat['lon'] = lon.tolist()
self.cloudsat['lat'] = lat.tolist()
self.cloudsat['timestamp'] = timestamp.tolist()
self.cloudsat['radar_refl'] = radar_refl.tolist()
self.cloudsat['cpr_cloud_mask'] = cpr_cloud_mask.tolist()
self.cloudsat['height'] = height.tolist()
# Dictionaries for dealing with cloudsat cloud classification data
self.cloudsat['cloud_dict'] = {
'0000': (0, 'None'),
'0001': (1, 'Ci'),
'0010': (2, 'As'),
'0011': (3, 'Ac'),
'0100': (4, 'St'),
'0101': (5, 'Sc'),
'0110': (6, 'Cu'),
'0111': (7, 'Ns'),
'1000': (8, 'Deep')
}
self.cloudsat['precip_dict'] = {
'00': (0, 'no precipitation'),
'01': (1, 'liquid precipitation'),
'10': (2, 'solid precipitation'),
'11': (3, 'possible drizzle')
}
def add_cloudclass(self, cldclass_data, cldclass_vdata):
"""Adds CloudSat cloud classification data to the radarPass instance.
PARAMETERS:
-----------
cldclass_data: data dictionary output from 'read_cloudsat_file' function
applied to a CloudSat cloud classification file.
cldclass_vdata: vdata dictionary output from 'read_cloudsat_file' function
applied to a CloudSat cloud classification file.
OUTPUTS:
--------
self (with cloud classification data)
EXCEPTIONS:
-----------
Exception: "Cloud classification file doesn't match with this radar pass."
Raised when the first five latitude/longitude values in the
radarPass instance and cloud classification file don't match.
"""
if (sum([int(a[0] == a[1]) for a in zip(self.cloudsat['lon'][:5], cldclass_vdata['Longitude'][:5])]) == 5 and
sum([int(a[0] == a[1]) for a in zip(self.cloudsat['lat'][:5], cldclass_vdata['Latitude'][:5])]) == 5):
# All the cloud class and precipitation type information in stored in a binary format.
# Need to decode the binary to retrieve the cloud class information.
self.cloudsat['cloud_class'] = np.flipud(
cldclass_data['cloud_scenario'].T).tolist()
binary_cc = [["{0:b}".format(val) for val in line] for line in
self.cloudsat['cloud_class']]
self.cloudsat['cloud_type'] = [[self.cloudsat['cloud_dict']
[val[-5:-1]][0] for val in line]
for line in binary_cc]
self.cloudsat['precip_type'] = [[self.cloudsat['precip_dict']
[val[-14:-12]
][0] if len(val) > 12 else -1
for val in line] for line in binary_cc]
else:
# If the data from the cloud class file is incompatible with the CloudSat data (first five
# latitudes and longitudes don't match), set fields equal to None.
self.cloudsat['cloud_class'] = None
self.cloudsat['cloud_type'] = None
self.cloudsat['precip_type'] = None
raise Exception(
"Cloud classification file doesn't match with this radar pass.")
return self
def add_cwc(self, cwc_data, cwc_vdata):
"""Adds CloudSat cloud water content data to the radarPass instance.
PARAMETERS:
-----------
cwc_data: data dictionary output from 'read_cloudsat_file' function
applied to a CloudSat cloud water content file.
cwc_vdata: vdata dictionary output from 'read_cloudsat_file' function
applied to a CloudSat cloud water content file.
OUTPUTS:
--------
self (with cloud water content data)
EXCEPTIONS:
-----------
Exception: "Cloud water content file doesn't match with this radar pass."
Raised when the first five latitude/longitude values in the
radarPass instance and cloud classification file don't match.
"""
if (sum([int(a[0] == a[1]) for a in zip(self.cloudsat['lon'][:5], cwc_vdata["Longitude"][:5])]) == 5 and
sum([int(a[0] == a[1]) for a in zip(self.cloudsat['lat'][:5], cwc_vdata["Latitude"][:5])]) == 5):
self.cloudsat['lwc'] = np.flipud(
cwc_data['RO_liq_water_content'].T)/1000
self.cloudsat['lwc_unc'] = np.flipud(
cwc_data['RO_liq_water_content_uncertainty'].T).astype(float)
self.cloudsat['iwc'] = np.flipud(
cwc_data['RO_ice_water_content'].T)/1000
self.cloudsat['iwc_unc'] = np.flipud(
cwc_data['RO_ice_water_content_uncertainty'].T).astype(float)
self.cloudsat['lwc_unc'][self.cloudsat['lwc'] < 0] = np.nan
self.cloudsat['iwc_unc'][self.cloudsat['iwc'] < 0] = np.nan
self.cloudsat['lwc'][self.cloudsat['lwc'] < 0] = np.nan
self.cloudsat['iwc'][self.cloudsat['iwc'] < 0] = np.nan
else:
self.cloudsat['lwc'] = None
self.cloudsat['lwc_unc'] = None
self.cloudsat['iwc'] = None
self.cloudsat['iwc_unc'] = None
raise Exception(
"Cloud water content file doesn't match with this radar pass.")
return self
def trim_pass(self, area_of_interest):
"""Trim the radar pass so that it only contains profiles in the 'area of interest'.
PARAMETERS:
-----------
area_of_interest: GeoDataFrame containing polygon that specifies
the "area of interest" for the dataframe. CRS must be equal to the
default_crs.
OUPTUTS:
--------
self (with all CloudSat fields trimmed to the area of interest).
"""
def determine_trim_indices(radar_pass, area_of_interest):
"""Determine which profile indices will be kept after the trimming."""
points = [Point(lon, lat) for lon, lat in zip(radar_pass.cloudsat['lon'],
radar_pass.cloudsat['lat'])]
traj_gdf = gpd.GeoDataFrame({'idx': list(range(len(radar_pass.cloudsat['lon']))),
'geometry': points})
traj_gdf.crs = {'init': 'epsg:4326'}
traj_gdf = traj_gdf.to_crs({'init': default_crs})
traj_gdf = gpd.sjoin(traj_gdf, area_of_interest, how='inner', op='within')\
.drop('index_right', axis=1)
inds_to_keep = np.asarray(traj_gdf['idx'])
return inds_to_keep
inds_to_keep = determine_trim_indices(self, area_of_interest)
# General radar pass data
self.cloudsat['lon'] = (np.array(self.cloudsat['lon'])[
inds_to_keep]).tolist()
self.cloudsat['lat'] = (np.array(self.cloudsat['lat'])[
inds_to_keep]).tolist()
self.cloudsat['timestamp'] = (
np.array(self.cloudsat['timestamp'])[inds_to_keep]).tolist()
self.cloudsat['radar_refl'] = (np.array(self.cloudsat['radar_refl'])[
:, inds_to_keep]).tolist()
self.cloudsat['cpr_cloud_mask'] = (np.array(self.cloudsat['cpr_cloud_mask'])[
:, inds_to_keep]).tolist()
# Cloud mask data
try:
self.cloudsat['cloud_class'] = (np.array(self.cloudsat['cloud_class'])[
:, inds_to_keep]).tolist()
self.cloudsat['cloud_type'] = (np.array(self.cloudsat['cloud_type'])[
:, inds_to_keep]).tolist()
self.cloudsat['precip_type'] = (np.array(self.cloudsat['precip_type'])[
:, inds_to_keep]).tolist()
except:
self.cloudsat['cloud_class'] = None
self.cloudsat['cloud_type'] = None
self.cloudsat['precip_type'] = None
# Cloud water content data
try:
self.cloudsat['lwc'] = (np.array(self.cloudsat['lwc'])[
:, inds_to_keep]).tolist()
self.cloudsat['lwc_unc'] = (np.array(self.cloudsat['lwc_unc'])[
:, inds_to_keep]).tolist()
self.cloudsat['iwc'] = (np.array(self.cloudsat['iwc'])[
:, inds_to_keep]).tolist()
self.cloudsat['iwc_unc'] = (np.array(self.cloudsat['iwc_unc'])[
:, inds_to_keep]).tolist()
except:
self.cloudsat['lwc'] = None
self.cloudsat['lwc_unc'] = None
self.cloudsat['iwc'] = None
self.cloudsat['iwc_unc'] = None
self.cloudsat['trim_inds'] = inds_to_keep
return self
def add_era5_data(self):
"""Adds ERA5 fields to the radar pass. The date/hour for the ERA5
fields is determined by rounding satellite pass' representative
time to the nearest hour.
TERMINOLOGY NOTE: 'atmospheric variables' files are netCDF4 files that contain
ERA5 weather variables up in the atmosphere (eg. at 850 mb and 700 mb pressure
levels). Their filenames names have 'atmospheric_vbls' in them. Similarly,
'single level variables' files are files that contain ERA5 variables that
are only available near the surface. These files have 'singlelevel_vbls'
in the filename.
PARAMETERS:
-----------
None
OUTPUTS:
--------
self (with one additional attribute called 'era5'. 'era5' is a dictionary that contains
all the era5 data, including the x coordinates, y coordinates, and atmospheric variables)
"""
def find_era_date_hour(radar_pass):
"""Find the appropriate ERA5 date/hour for the given radarPass instance.
PARAMETERS:
-----------
radar_pass: radarPass instance.
OUTPUTS:
--------
era5_date: ERA5 date in string format (%Y%m%d).
era5_hour: ERA5 hour in integer format.
"""
t = radar_pass.get_representative_time()
if t.minute >= 30:
rounded_time = t.replace(
second=0, microsecond=0, minute=0, hour=t.hour+1)
else:
rounded_time = t.replace(second=0, microsecond=0, minute=0)
return datetime.datetime.strftime(rounded_time, '%Y%m%d'), rounded_time.hour
def open_atmospheric_vbls_file(radar_pass):
"""Opens ERA5 'atmospheric variables' file corresponding to the
given radarPass instance.
PARAMETERS:
-----------
radar_pass: radarPass instance.
OUTPUTS:
--------
atm_dataset: netCDF4 Dataset object for atmospheric variables file.
hr: hour corresponding to atmospheric variables file (integer).
"""
dstr, hr = find_era_date_hour(radar_pass)
file = 'ERA5_atmospheric_vbls_'+dstr+'.nc'
atm_dataset = Dataset(os.path.join(data_path, 'ERA5', file))
return atm_dataset, hr
def open_singlelevel_vbls_file(radar_pass):
"""Opens ERA5 'single level variables' file corresponding to the
given radarPass instance.
PARAMETERS:
-----------
radar_pass: radarPass instance.
OUTPUTS:
--------
atm_dataset: netCDF4 Dataset object for single level variables file.
hr: hour corresponding to single level variables file (integer).
"""
dstr, hr = find_era_date_hour(radar_pass)
file = 'ERA5_singlelevel_vbls_'+dstr+'.nc'
slev_dataset = Dataset(os.path.join(data_path, 'ERA5', file))
return slev_dataset, hr
def reproject_from_epsg4326(lon, lat, field, dst_crs):
"""Reproject a data field from lat/lon coordinates to a specified crs.
PARAMETERS:
-----------
lon: array-like list/vector of longitude coordinates for the data field.
lat: array-like list/vector of latitude coordinates for the data field.
field: array-like data field of shape: len(lat), len(lon).
dst_crs: destination CRS.
OUTPUTS:
--------
dst_x: vector of x coordinates for reprojected field
dst_y: vector list/vector of y coordinates for reprojected field
destination_array: array of reprojected field (of shape len(y), len(x)).
"""
# Parameters needed for the transformation.
width = field.shape[1]
height = field.shape[0]
left, bottom, right, top = lon[0], lat[-1], lon[-1], lat[0]
src_crs = {'init': 'epsg:4326'}
# Calculate affine transformation matrix for the source field.
src_transform = from_bounds(
left, bottom, right, top, width, height)
# Calculate affine transformation matrix, width, and height for the destination field.
dst_transform, dst_width, dst_height = calculate_default_transform(src_crs, dst_crs, width,
height, left=left,
bottom=bottom,
right=right, top=top)
# Perform reprojection.
destination_array = np.zeros((dst_height, dst_width))
reproject(source=field, destination=destination_array, src_transform=src_transform,
src_crs=src_crs, dst_transform=dst_transform,
dst_crs=dst_crs, resampling=Resampling.nearest)
# Replace fill values with NaN's.
destination_array[destination_array == 1e20] = np.NaN
# Get X and Y vectors for the transformed field. I could be doing this wrong,
# but my method definitely works for this specific Arctic case!
dst_x = np.linspace(dst_transform[2], dst_transform[5], dst_width)
dst_y = np.linspace(dst_transform[2], dst_transform[5], dst_height)
return dst_x, dst_y, destination_array
def get_lat_lon_level_hours(atm_dataset):
"""Extracts latitude, longitude, time, and level vectors
from an ERA5 netCDF dataset.
PARAMETERS:
-----------
atm_dataset: netCDF4 Dataset object for a given ERA5 'atmospheric variables'
or 'single level variables' file.
OUTPUTS:
--------
lat: vector of latitude coordinates.
lon: vector of longitude coordinates.
hours: vector of hours.
levels: vector of levels. 'None' for single level variables files.
"""
lat = atm_dataset.variables['latitude'][:]
lon = atm_dataset.variables['longitude'][:]
hours = np.array([(pd.Timestamp('19000101')+(hrs_since*pd.Timedelta('1 hours'))).hour
for hrs_since in atm_dataset.variables['time'][:]])
if 'level' in atm_dataset.variables.keys():
levels = atm_dataset.variables['level'][:]
else:
levels = None
return lat, lon, hours, levels
def get_weather_variable_names(atm_dataset):
"""Retrieves the names of netCDF variables that correspond to weather variables
(and are not latitude, longitude, level, or time).
PARAMETERS:
-----------
atm_dataset: netCDF4 Dataset object for ERA5 'atmospheric variables' or
'single level variables' dataset.
OUTPUTS:
--------
var_list = list of weather variable names.
"""
return [var for var in atm_dataset.variables.keys() if var not in
['longitude', 'latitude', 'level', 'time']]
def add_era5_atmospheric_vbls(atm_dataset, hr, era5={}):
"""Adds variables from an ERA5 'atmospheric variables' netCDF4 file
to an 'era5' dictionary.
PARAMETERS:
-----------
atm_dataset: netCDF4 Dataset object corresponding to the
'atmospheric variables' file.
hr: integer hour corresponding to the radarPass object's
representative time.
era5: Dictionary object to add the ERA5 data to.
OUTPUTS:
--------
era5: Dictionary with added ERA5 data. Contains keys for x
coordinates, y coordinates, and data fields for each variable/level.
"""
lat, lon, hours, levels = get_lat_lon_level_hours(atm_dataset)
wx_vbls = get_weather_variable_names(atm_dataset)
for vbl in wx_vbls:
for level in levels:
i_time = np.where(hours == hr)[0][0]
i_level = np.where(levels == level)[0][0]
field = atm_dataset.variables[vbl][i_time, i_level, :, :]
reproj_x, reproj_y, reproj_field = reproject_from_epsg4326(lon, lat, field,
{'init': default_crs})
if vbl in ['q', 'clwc', 'ciwc']: # Change units
reproj_field = reproj_field*1000
elif vbl == 'z':
reproj_field = reproj_field/9.81
era5[vbl+'_'+str(level)] = np.flipud(reproj_field).tolist()
# Convert vertical velocities to mm/s
era5['w_700'] = (-np.array(era5['w_700'])*287 *
np.array(era5['t_700'])/(7e4*9.81)*1000).tolist()
era5['w_850'] = (-np.array(era5['w_850'])*287 *
np.array(era5['t_850'])/(8.5e4*9.81)*1000).tolist()
era5['x'], era5['y'] = reproj_x, reproj_y
return era5
def add_era5_singlelevel_vbls(slev_dataset, hr, era5={}):
"""Adds variables from an ERA5 'single level variables' netCDF4 file
to an 'era5' dictionary.
PARAMETERS:
-----------
atm_dataset: netCDF4 Dataset object corresponding to the
'single level variables' file.
hr: integer hour corresponding to the radarPass object's
representative time.
era5: Dictionary object to add the ERA5 data to.
OUTPUTS:
--------
era5: Dictionary with added ERA5 data. Contains keys for x
coordinates, y coordinates, and data fields for each added variable.
"""
lat, lon, hours, _ = get_lat_lon_level_hours(slev_dataset)
wx_vbls = get_weather_variable_names(slev_dataset)
for vbl in wx_vbls:
i_time = np.where(hours == hr)[0][0]
field = slev_dataset.variables[vbl][i_time, :, :]
reproj_x, reproj_y, reproj_field = reproject_from_epsg4326(lon, lat, field,
{'init': default_crs})
if vbl in ['msl', 'sp']: # Change units
reproj_field = reproj_field/100
era5[vbl] = np.flipud(reproj_field).tolist()
if 'x' not in era5.keys() and 'y' not in era5.keys():
era5['x'], era5['y'] = reproj_x.tolist(), reproj_y.tolist()
return era5
atm_dataset, hr = open_atmospheric_vbls_file(self)
era5 = add_era5_atmospheric_vbls(atm_dataset, hr, era5={})
slev_dataset, hr = open_singlelevel_vbls_file(self)
era5 = add_era5_singlelevel_vbls(slev_dataset, hr, era5=era5)
self.era5 = era5
return self
def get_json_serializable_obj(self):
"""Converts structure of radarPass object to something that's
JSON serializable (contains only dictionaries and lists).
PARAMETERS:
-----------
None
OUTPUTS:
--------
radar_dict: dictionary containing all the information that the
radarPass object has.
"""
radar_dict = self.__dict__
for key, val in radar_dict['cloudsat'].items():
if isinstance(val, np.ndarray):
radar_dict['cloudsat'][key] = val.tolist()
for key, val in radar_dict['era5'].items():
if isinstance(val, np.ndarray):
radar_dict['era5'][key] = val.tolist()
return radar_dict
def reduce_size_cloudsat(self, reduction_factor=3,
trim_vbls=['cpr_cloud_mask', 'cloud_class', 'precip_type', 'lwc_unc', 'iwc_unc']):
"""Reduces the size of the CloudSat data fields in two ways:
1. Reduces image sizes.
2. Removes fields that are less useful.
PARAMETERS:
-----------
reduction_factor: Default 3. Image sizes are reduced by taking every n'th pixel in
the time dimension, where n is the reduction_factor.
trim_vbls: List of less-important variables that will be removed.
OUTPUTS:
--------
self (with smaller and/or fewer data fields)
"""
reduce_1d = ['lon', 'lat', 'timestamp']
for var in reduce_1d:
self.cloudsat[var] = (np.array(self.cloudsat[var])[
::reduction_factor]).tolist()
reduce_2d = ['radar_refl', 'cpr_cloud_mask', 'cloud_class', 'cloud_type', 'precip_type',
'lwc', 'lwc_unc', 'iwc', 'iwc_unc']
for var in reduce_2d:
self.cloudsat[var] = (np.array(self.cloudsat[var])[
:, ::reduction_factor]).tolist()
for vbl in trim_vbls:
del(self.cloudsat[vbl])
return self
def reduce_size_era5(self, reduction_factor=3, trim_vbls=['d2m', 'sp', 'z_850', 'z_700']):
"""Reduces the size of the ERA5 data fields in two ways:
1. Reduces image sizes.
2. Removes fields that are less useful.
PARAMETERS:
-----------
reduction_factor: Default 3. Image sizes are reduced by taking every n'th pixel in
the x and y dimensions, where n is the reduction_factor.
trim_vbls: List of less-important variables that will be removed.
OUTPUTS:
--------
self (with smaller and/or fewer data fields)
"""
reduce_1d = ['x', 'y']
for var in reduce_1d:
self.era5[var] = (np.array(self.era5[var])[
::reduction_factor]).tolist()
reduce_2d = [var for var in self.era5.keys()
if var not in reduce_1d]
for var in reduce_2d:
self.era5[var] = (np.array(self.era5[var])[
::reduction_factor, ::reduction_factor]).tolist()
for vbl in trim_vbls:
del(self.era5[vbl])
return self
def get_representative_time(self):
"""Retrieves a representative time for the radarPass instance. This
representative time is the median time for the radar pass.
PARAMETERS:
-----------
None
OUTPUTS:
--------
rep_time: representative time (pd.Timestamp)
"""
return self.cloudsat['timestamp'][len(self.cloudsat['timestamp'])//2]
def save_radarPass_object_pkl(radar_pass, data_path):
"""Saves a radarPass instance to a pickle file with a standard filename format."""
pass_timestamp = radar_pass.cloudsat['timestamp'][len(radar_pass.cloudsat['timestamp'])//2]\
.strftime('%Y%m%d_%H%M%S')
fname = 'radarPass_plot_'+pass_timestamp+'.pkl'
with open(os.path.join(data_path, 'radar_passes', fname), 'wb') as f:
pickle.dump(radar_pass, f, 2)
return pass_timestamp
def load_radarPass_object_pkl(pass_timestamp, data_path):
"""Loads a radarPass instance from a pickle file with a standard filename format."""
fname = 'radarPass_plot_'+pass_timestamp+'.pkl'
with open(os.path.join(data_path, 'radar_passes', fname), 'rb') as f:
radar_pass = pickle.load(f)
return radar_pass
def save_radarPass_object_json(radar_pass, data_path):
"""Saves a radarPass instance to a json file with a standard filename format."""
pass_timestamp = radar_pass.cloudsat['timestamp'][len(radar_pass.cloudsat['timestamp'])//2]\
.strftime('%Y%m%d_%H%M%S')
fname = 'radarPass_plot_'+pass_timestamp+'.json'
radar_dict = radar_pass.get_json_serializable_obj()
radar_dict['timestamp'] = [tstamp.strftime(
'%Y%m%d_%H%M%S.%f') for tstamp in radar_dict['timestamp']]
with open(os.path.join(data_path, 'radar_passes', fname), 'w') as f:
json.dump(radar_dict, f)
return pass_timestamp
def specify_area_of_interest_EPSG4326(bbox=(-180, 60, 180, 90)):
"""Specifies the area of interest for the plot in the lat/lon coordinate system"""
area_of_int = gpd.GeoDataFrame({'geometry': [box(*bbox)]})
area_of_int.crs = {'init': 'epsg:4326'}
return area_of_int
def specify_area_of_interest_EPSG3995(bbox=(-3e6, -3e6, 3e6, 3e6)):
"""Specifies the area of interest for the plot in the Polar
Stereographic coordinate system.
"""
area_of_int = gpd.GeoDataFrame({'geometry': [box(*bbox)]})
area_of_int.crs = {'init': default_crs}
return area_of_int
def load_country_geometries(area_of_interest):
"""Loads polygons for all of the world's countries, converts them to the default_crs."""
countries = gpd.read_file(os.path.join(
data_path, 'Countries_WGS84', 'Countries_WGS84.shp'))
countries = countries.to_crs({'init': default_crs})
countries = gpd.overlay(countries, area_of_interest, how='intersection')
return countries
def prepare_polygon_coords_for_bokeh(countries):
"""Prepares the country polygons for plotting with Bokeh.
To plot series of polygons, Bokeh needs two lists of lists (one for x coordinates, and another
for y coordinates). Each element in the outer list represents a single polygon, and each
element in the inner lists represents the coordinate for a single point in given polygon.
This function takes a GeoDataFrame with a given set of countries, and returns Bokeh-friendly
lists of x coordinates and y coordinates for those countries.
PARAMETERS:
-----------
countries: GeoDataFrame with a given set of countries.
OUTPUTS:
--------
x_coords, y_coords: Bokeh-friendly lists of x and y coordinates for those countries.
"""
# Simplify shapes (to resolution of 10000 meters), convert polygons to multipolygons.
list_of_polygons = []
for raw_poly in countries['geometry']:
raw_poly = raw_poly.simplify(10000, preserve_topology=False)
if isinstance(raw_poly, Polygon):
raw_poly = MultiPolygon([raw_poly])
for poly in list(raw_poly):
list_of_polygons.append(poly)
# Create lists of lists.
x_coords = [list(poly.exterior.coords.xy[0]) for poly in list_of_polygons]
y_coords = [list(poly.exterior.coords.xy[1]) for poly in list_of_polygons]
return x_coords, y_coords
|
<filename>pyserver/serv_simple.py
import os.path, os
if not os.path.exists("./config_local.py"):
f = open("config_local.py", "w")
f.close()
import config, sys
if not hasattr(config, "doc_root"):
config.doc_root = os.path.abspath(os.path.normpath(os.getcwd()+"/..".replace("/", os.path.sep)))
if not os.path.exists(config.doc_root):
os.makedirs(config.doc_root)
if not config.is_set("use_sqlite"):
config.use_sqlite = True
if not config.is_set("serv_local"):
config.serv_local = True
if not config.is_set("serv_all_local"):
config.serv_all_local = True #allow full access to local filesystem
from config import *
import mimetypes
def bstr(s):
if type(s) == bytes:
return s
return bytes(str(s), "latin-1")
def mime(path):
return mimetypes.guess_type(path)
if sys.version_info.major > 2:
from http import *
from http.server import *
else:
from httplib import *
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from logger import elog, mlog, alog
import os, sys, os.path, math, random, time, io, gc
import shelve, imp, struct, ctypes
import mimetypes
from auth import AuthAPI_RefreshToken_WPHack, AuthAPI_OAuthStart, AuthAPI_GetUserInfo, AuthAPI_RefreshToken, AuthAPI_SessionToken
from fileapi import FileAPI_DirList, FileAPI_GetMeta, FileAPI_UploadStart, FileAPI_UploadChunk, FileAPI_GetFile
import config, json
from api import api_handlers
import db_engine
proxy_cls = """
class ObjProxy (object):
def __init__(self, obj, methodmap):
self.methodmap = methodmap
self.proxy = obj
def __getattribute__(self, attr):
if attr.endswith("_override"):
attr = attr[:len(attr)-len("_override")]
proxy = object.__getattribute__(self, "proxy")
return object.__getattribute__(proxy, attr)
#print("GETATTR")
proxy = object.__getattribute__(self, "proxy")
methodmap = object.__getattribute__(self, "methodmap")
#print(methodmap)
if attr in methodmap:
methodmap[attr](attr)
return object.__getattribute__(proxy, attr)
"""
specials = [
"__str__", "__repr__", "__len__", ["__getitem__", "item"],
["__setitem__", "item, val"], ["__hasitem__", "item"],
["__eq__", "b"], "__hash__", "__iter__", ["__neq__", "b"],
["__contains__", "item"], ["__delitem__", "item"],
["__lt__", "b"], ["__gt__", "b"], ["__le__", "b"], ["__ge__", "b"]
]
for k in specials:
args1 = args2 = ""
if type(k) in [list, tuple]:
k, args = k
args1 = ", " + args
args2 = args
proxy_cls += """
def K(selfARGS1):
attr = "K"
proxy = object.__getattribute__(self, "proxy")
methodmap = object.__getattribute__(self, "methodmap")
if attr in methodmap:
methodmap[attr](attr)
return object.__getattribute__(proxy, attr)(ARGS2)
""".replace("K", k).replace("ARGS1", args1).replace("ARGS2", args2)
exec(proxy_cls)
class ReqHandler (BaseHTTPRequestHandler):
def __init__(self, *args):
BaseHTTPRequestHandler.__init__(self, *args)
def _on_write(self, attr):
#print("on_write!", self._sent_headers)
if not self._sent_headers:
self._sent_headers = True
self.end_headers()
def end_headers(self, *args):
self._sent_headers = True
BaseHTTPRequestHandler.end_headers(self, *args)
def format_err(self, buf):
if type(buf) == bytes: buf = str(buf, "latin-1")
header = """
<!DOCTYPE html><html><head><title>Build Error</title></head>
<body><h1>Build Failure</h1><h3>
"""
footer = """
</h3>
</body>
"""
ret = ""
for b in buf:
if b == "\n": ret += "<br />"
if b == " ": ret += " "
if b == "\t": ret += "  "
ret += b
return (header + ret + footer).encode()
def set_ipaddr(self):
adr = self.client_address
if type(adr) != str and len(adr) == 2:
adr = str(adr[0]) + ":" + str(adr[1])
else:
adr = str(adr)
config.client_ip = adr
def start_Req(self):
self.orig_wfile = self.wfile
self._sent_headers = False
self.wfile = ObjProxy(self.wfile, {"write" : self._on_write})
self.set_ipaddr()
def do_GET(self):
self.start_Req()
alog("GET " + self.path)
if "Connection" in self.headers:
keep_alive = self.headers["Connection"].strip().lower() == "keep-alive"
else:
keep_alive = False
wf = self.wfile
body = [b"yay, tst"]
print(self.path)
if self.has_handler(self.path):
self.exec_handler(self.path, "GET")
return
elif self.path.strip() == "/" or self.path.strip() == "":
path = doc_root + "/build/main.html"
else:
path = self.path.strip()
if not path.startswith("/fcontent/"):
self.send_error(404)
return
if ".." in path or ":" in path:
self.send_error(503)
return
path = os.path.normpath(doc_root + self.path.replace("/fcontent/", "/build/"))
if not os.path.exists(path):
self.send_error(404)
return
f = open(path, "rb")
csize = 1024*1024
ret = f.read(csize)
body = [ret];
while ret not in ["", b'', None]:
ret = f.read(csize);
body.append(ret);
f.close()
if type(body) == str:
body = [bytes.decode(body, "latin-1")]
elif type(body) == bytes:
body = [body]
bodysize = 0
for chunk in body:
bodysize += len(chunk)
if path.strip().endswith(".js"):
mm = "application/javascript"
else:
mm = mime(path)[0]
self.gen_headers("GET", bodysize, mm);
b = b""
for chunk in body:
b += chunk
wf.write(b);
#for chunk in body:
# wf.write(chunk);
def has_handler(self, path):
for k in api_handlers:
if path.startswith(k): return True
return False
def exec_handler(self, path, op):
handler = None
#find matching handler with largest prefix
for k in api_handlers:
if path.startswith(k):
if handler == None or len(k) > len(handler):
handler = k
if handler != None:
getattr(api_handlers[handler](), "do_"+op)(self)
else:
print("ERROR: bad handler", path, op)
def restart(self):
global restart_server
#restart_server = True
print("\nRestarting Server...\n")
self.server.shutdown()
def do_POST(self):
self.start_Req()
self.set_ipaddr()
path = self.path
alog("POST " + self.path)
if self.has_handler(path):
self.exec_handler(path, "POST")
else:
self.send_error(404)
def do_PUT(self):
self.start_Req()
alog("PUT " + self.path)
self.set_ipaddr()
path = self.path
if self.has_handler(path):
self.exec_handler(path, "PUT")
else:
self.send_error(404)
def send_header(self, *args):
wf = self.wfile
self.wfile = self.orig_wfile
ret = BaseHTTPRequestHandler.send_header(self, *args)
self.wfile = wf
return ret
def gen_headers(self, method, length, type, extra_headers={}):
#if type == "text/html":
# type = "application/xhtml"
self.wfile.write_override(bstr(method) + b" http/1.1\r\n")
self.send_header("Content-Type", type)
self.send_header("Content-Length", length)
if "Via" in self.headers:
uri = "http://"+serverhost+self.path
print("Got 'Via':", uri)
alog("Got 'Via': " + str(uri));
self.send_header("Content-Location", uri)
for k in extra_headers:
self.send_header(k, extra_headers[k])
if "Via" in self.headers:
pass
#self.send_header("Via", self.headers["Via"])
#self.send_header("Connection", "close")
#self.send_header("Host", serverhost)
self.send_header("Server-Host", serverhost)
#self.end_headers()
def send_error(self, code, obj=None):
if obj != None:
msg = json.dumps(obj)
BaseHTTPRequestHandler.send_error(self, code, msg)
else:
BaseHTTPRequestHandler.send_error(self, code)
return
if obj == None: obj = {}
obj["result"] = 0
obj["error"] = code
self.code = code
self.codemsg = "ERR"
body = json.dumps(obj)
self.gen_headers("GET", len(body), "application/x-javascript")
self.wfile.write(bstr(body))
import sqlite_db
if not os.path.exists("database.db"):
print("initializing database...")
sqlite_db.default_db()
"""
cur, con = sqlite_db.sql_connect()
cur.execute("INSERT INTO users (username,name_first,name_last,password,email,permissions) VALUES ('user2','','',7,'{SHA}2jmj7l5rSw0yVb/vlWAYkK/YBwk=','me@localhost');");
con.commit()
res = cur.execute("SELECT * FROM ussers")
for row in res:
print(row)
sys.exit()
#"""
restart_server = True
while restart_server:
restart_server = False
server = HTTPServer((serverhost, serverport), ReqHandler);
#server.socket = ssl.wrap_socket(server.socket, certfile=certpath, keyfile="privateKey.key")
print("running on port", serverport)
server.serve_forever()
|
from sqlite_utils.db import (
Index,
Database,
ForeignKey,
AlterError,
NoObviousTable,
ForeignKey,
)
from sqlite_utils.utils import sqlite3
import collections
import datetime
import decimal
import json
import pathlib
import pytest
from .utils import collapse_whitespace
try:
import pandas as pd
except ImportError:
pd = None
def test_create_table(fresh_db):
assert [] == fresh_db.table_names()
table = fresh_db.create_table(
"test_table",
{
"text_col": str,
"float_col": float,
"int_col": int,
"bool_col": bool,
"bytes_col": bytes,
"datetime_col": datetime.datetime,
},
)
assert ["test_table"] == fresh_db.table_names()
assert [
{"name": "text_col", "type": "TEXT"},
{"name": "float_col", "type": "FLOAT"},
{"name": "int_col", "type": "INTEGER"},
{"name": "bool_col", "type": "INTEGER"},
{"name": "bytes_col", "type": "BLOB"},
{"name": "datetime_col", "type": "TEXT"},
] == [{"name": col.name, "type": col.type} for col in table.columns]
assert (
"CREATE TABLE [test_table] (\n"
" [text_col] TEXT,\n"
" [float_col] FLOAT,\n"
" [int_col] INTEGER,\n"
" [bool_col] INTEGER,\n"
" [bytes_col] BLOB,\n"
" [datetime_col] TEXT\n"
")"
) == table.schema
def test_create_table_compound_primary_key(fresh_db):
table = fresh_db.create_table(
"test_table", {"id1": str, "id2": str, "value": int}, pk=("id1", "id2")
)
assert (
"CREATE TABLE [test_table] (\n"
" [id1] TEXT,\n"
" [id2] TEXT,\n"
" [value] INTEGER,\n"
" PRIMARY KEY ([id1], [id2])\n"
")"
) == table.schema
assert ["id1", "id2"] == table.pks
def test_create_table_with_bad_defaults(fresh_db):
with pytest.raises(AssertionError):
fresh_db.create_table(
"players", {"name": str, "score": int}, defaults={"mouse": 1}
)
def test_create_table_with_invalid_column_characters(fresh_db):
with pytest.raises(AssertionError):
fresh_db.create_table("players", {"name[foo]": str})
def test_create_table_with_defaults(fresh_db):
table = fresh_db.create_table(
"players",
{"name": str, "score": int},
defaults={"score": 1, "name": "bob''bob"},
)
assert ["players"] == fresh_db.table_names()
assert [{"name": "name", "type": "TEXT"}, {"name": "score", "type": "INTEGER"}] == [
{"name": col.name, "type": col.type} for col in table.columns
]
assert (
"CREATE TABLE [players] (\n [name] TEXT DEFAULT 'bob''''bob',\n [score] INTEGER DEFAULT 1\n)"
) == table.schema
def test_create_table_with_bad_not_null(fresh_db):
with pytest.raises(AssertionError):
fresh_db.create_table(
"players", {"name": str, "score": int}, not_null={"mouse"}
)
def test_create_table_with_not_null(fresh_db):
table = fresh_db.create_table(
"players",
{"name": str, "score": int},
not_null={"name", "score"},
defaults={"score": 3},
)
assert ["players"] == fresh_db.table_names()
assert [{"name": "name", "type": "TEXT"}, {"name": "score", "type": "INTEGER"}] == [
{"name": col.name, "type": col.type} for col in table.columns
]
assert (
"CREATE TABLE [players] (\n [name] TEXT NOT NULL,\n [score] INTEGER NOT NULL DEFAULT 3\n)"
) == table.schema
@pytest.mark.parametrize(
"example,expected_columns",
(
(
{"name": "Ravi", "age": 63},
[{"name": "name", "type": "TEXT"}, {"name": "age", "type": "INTEGER"}],
),
(
{"create": "Reserved word", "table": "Another"},
[{"name": "create", "type": "TEXT"}, {"name": "table", "type": "TEXT"}],
),
({"day": datetime.time(11, 0)}, [{"name": "day", "type": "TEXT"}]),
({"decimal": decimal.Decimal("1.2")}, [{"name": "decimal", "type": "FLOAT"}]),
),
)
def test_create_table_from_example(fresh_db, example, expected_columns):
people_table = fresh_db["people"]
assert None == people_table.last_rowid
assert None == people_table.last_pk
people_table.insert(example)
assert 1 == people_table.last_rowid
assert 1 == people_table.last_pk
assert ["people"] == fresh_db.table_names()
assert expected_columns == [
{"name": col.name, "type": col.type} for col in fresh_db["people"].columns
]
def test_create_table_from_example_with_compound_primary_keys(fresh_db):
record = {"name": "Zhang", "group": "staff", "employee_id": 2}
table = fresh_db["people"].insert(record, pk=("group", "employee_id"))
assert ["group", "employee_id"] == table.pks
assert record == table.get(("staff", 2))
@pytest.mark.parametrize(
"method_name", ("insert", "upsert", "insert_all", "upsert_all")
)
def test_create_table_with_custom_columns(fresh_db, method_name):
table = fresh_db["dogs"]
method = getattr(table, method_name)
record = {"id": 1, "name": "Cleo", "age": "5"}
if method_name.endswith("_all"):
record = [record]
method(record, pk="id", columns={"age": int, "weight": float})
assert ["dogs"] == fresh_db.table_names()
expected_columns = [
{"name": "id", "type": "INTEGER"},
{"name": "name", "type": "TEXT"},
{"name": "age", "type": "INTEGER"},
{"name": "weight", "type": "FLOAT"},
]
assert expected_columns == [
{"name": col.name, "type": col.type} for col in table.columns
]
assert [{"id": 1, "name": "Cleo", "age": 5, "weight": None}] == list(table.rows)
@pytest.mark.parametrize("use_table_factory", [True, False])
def test_create_table_column_order(fresh_db, use_table_factory):
row = collections.OrderedDict(
(
("zzz", "third"),
("abc", "first"),
("ccc", "second"),
("bbb", "second-to-last"),
("aaa", "last"),
)
)
column_order = ("abc", "ccc", "zzz")
if use_table_factory:
fresh_db.table("table", column_order=column_order).insert(row)
else:
fresh_db["table"].insert(row, column_order=column_order)
assert [
{"name": "abc", "type": "TEXT"},
{"name": "ccc", "type": "TEXT"},
{"name": "zzz", "type": "TEXT"},
{"name": "bbb", "type": "TEXT"},
{"name": "aaa", "type": "TEXT"},
] == [{"name": col.name, "type": col.type} for col in fresh_db["table"].columns]
@pytest.mark.parametrize(
"foreign_key_specification,expected_exception",
(
# You can specify triples, pairs, or a list of columns
((("one_id", "one", "id"), ("two_id", "two", "id")), False),
((("one_id", "one"), ("two_id", "two")), False),
(("one_id", "two_id"), False),
# You can also specify ForeignKey tuples:
(
(
ForeignKey("m2m", "one_id", "one", "id"),
ForeignKey("m2m", "two_id", "two", "id"),
),
False,
),
# If you specify a column that doesn't point to a table, you get an error:
(("one_id", "two_id", "three_id"), NoObviousTable),
# Tuples of the wrong length get an error:
((("one_id", "one", "id", "five"), ("two_id", "two", "id")), AssertionError),
# Likewise a bad column:
((("one_id", "one", "id2"),), AlterError),
# Or a list of dicts
(({"one_id": "one"},), AssertionError),
),
)
@pytest.mark.parametrize("use_table_factory", [True, False])
def test_create_table_works_for_m2m_with_only_foreign_keys(
fresh_db, foreign_key_specification, expected_exception, use_table_factory
):
if use_table_factory:
fresh_db.table("one", pk="id").insert({"id": 1})
fresh_db.table("two", pk="id").insert({"id": 1})
else:
fresh_db["one"].insert({"id": 1}, pk="id")
fresh_db["two"].insert({"id": 1}, pk="id")
row = {"one_id": 1, "two_id": 1}
def do_it():
if use_table_factory:
fresh_db.table("m2m", foreign_keys=foreign_key_specification).insert(row)
else:
fresh_db["m2m"].insert(row, foreign_keys=foreign_key_specification)
if expected_exception:
with pytest.raises(expected_exception):
do_it()
return
else:
do_it()
assert [
{"name": "one_id", "type": "INTEGER"},
{"name": "two_id", "type": "INTEGER"},
] == [{"name": col.name, "type": col.type} for col in fresh_db["m2m"].columns]
assert sorted(
[
{"column": "one_id", "other_table": "one", "other_column": "id"},
{"column": "two_id", "other_table": "two", "other_column": "id"},
],
key=lambda s: repr(s),
) == sorted(
[
{
"column": fk.column,
"other_table": fk.other_table,
"other_column": fk.other_column,
}
for fk in fresh_db["m2m"].foreign_keys
],
key=lambda s: repr(s),
)
def test_create_error_if_invalid_foreign_keys(fresh_db):
with pytest.raises(AlterError):
fresh_db["one"].insert(
{"id": 1, "ref_id": 3},
pk="id",
foreign_keys=(("ref_id", "bad_table", "bad_column"),),
)
@pytest.mark.parametrize(
"col_name,col_type,not_null_default,expected_schema",
(
("nickname", str, None, "CREATE TABLE [dogs] ( [name] TEXT , [nickname] TEXT)"),
("dob", datetime.date, None, "CREATE TABLE [dogs] ( [name] TEXT , [dob] TEXT)"),
("age", int, None, "CREATE TABLE [dogs] ( [name] TEXT , [age] INTEGER)"),
("weight", float, None, "CREATE TABLE [dogs] ( [name] TEXT , [weight] FLOAT)"),
("text", "TEXT", None, "CREATE TABLE [dogs] ( [name] TEXT , [text] TEXT)"),
(
"integer",
"INTEGER",
None,
"CREATE TABLE [dogs] ( [name] TEXT , [integer] INTEGER)",
),
("float", "FLOAT", None, "CREATE TABLE [dogs] ( [name] TEXT , [float] FLOAT)"),
("blob", "blob", None, "CREATE TABLE [dogs] ( [name] TEXT , [blob] BLOB)"),
(
"default_str",
None,
None,
"CREATE TABLE [dogs] ( [name] TEXT , [default_str] TEXT)",
),
(
"nickname",
str,
"",
"CREATE TABLE [dogs] ( [name] TEXT , [nickname] TEXT NOT NULL DEFAULT '')",
),
(
"nickname",
str,
"dawg's dawg",
"CREATE TABLE [dogs] ( [name] TEXT , [nickname] TEXT NOT NULL DEFAULT 'dawg''s dawg')",
),
),
)
def test_add_column(fresh_db, col_name, col_type, not_null_default, expected_schema):
fresh_db.create_table("dogs", {"name": str})
assert "CREATE TABLE [dogs] ( [name] TEXT )" == collapse_whitespace(
fresh_db["dogs"].schema
)
fresh_db["dogs"].add_column(col_name, col_type, not_null_default=not_null_default)
assert expected_schema == collapse_whitespace(fresh_db["dogs"].schema)
def test_add_foreign_key(fresh_db):
fresh_db["authors"].insert_all(
[{"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"}], pk="id"
)
fresh_db["books"].insert_all(
[
{"title": "Hedgehogs of the world", "author_id": 1},
{"title": "How to train your wolf", "author_id": 2},
]
)
assert [] == fresh_db["books"].foreign_keys
fresh_db["books"].add_foreign_key("author_id", "authors", "id")
assert [
ForeignKey(
table="books", column="author_id", other_table="authors", other_column="id"
)
] == fresh_db["books"].foreign_keys
def test_add_foreign_key_error_if_column_does_not_exist(fresh_db):
fresh_db["books"].insert(
{"id": 1, "title": "Hedgehogs of the world", "author_id": 1}
)
with pytest.raises(AlterError):
fresh_db["books"].add_foreign_key("author2_id", "books", "id")
def test_add_foreign_key_error_if_other_table_does_not_exist(fresh_db):
fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1})
with pytest.raises(AlterError):
fresh_db["books"].add_foreign_key("author_id", "authors", "id")
def test_add_foreign_key_error_if_already_exists(fresh_db):
fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1})
fresh_db["authors"].insert({"id": 1, "name": "Sally"}, pk="id")
fresh_db["books"].add_foreign_key("author_id", "authors", "id")
with pytest.raises(AlterError) as ex:
fresh_db["books"].add_foreign_key("author_id", "authors", "id")
assert "Foreign key already exists for author_id => authors.id" == ex.value.args[0]
def test_add_foreign_keys(fresh_db):
fresh_db["authors"].insert_all(
[{"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"}], pk="id"
)
fresh_db["categories"].insert_all([{"id": 1, "name": "Wildlife"}], pk="id")
fresh_db["books"].insert_all(
[{"title": "Hedgehogs of the world", "author_id": 1, "category_id": 1}]
)
assert [] == fresh_db["books"].foreign_keys
fresh_db.add_foreign_keys(
[
("books", "author_id", "authors", "id"),
("books", "category_id", "categories", "id"),
]
)
assert [
ForeignKey(
table="books", column="author_id", other_table="authors", other_column="id"
),
ForeignKey(
table="books",
column="category_id",
other_table="categories",
other_column="id",
),
] == sorted(fresh_db["books"].foreign_keys)
def test_add_column_foreign_key(fresh_db):
fresh_db.create_table("dogs", {"name": str})
fresh_db.create_table("breeds", {"name": str})
fresh_db["dogs"].add_column("breed_id", fk="breeds")
assert (
"CREATE TABLE [dogs] ( [name] TEXT , [breed_id] INTEGER, FOREIGN KEY(breed_id) REFERENCES breeds(rowid) )"
== collapse_whitespace(fresh_db["dogs"].schema)
)
# And again with an explicit primary key column
fresh_db.create_table("subbreeds", {"name": str, "primkey": str}, pk="primkey")
fresh_db["dogs"].add_column("subbreed_id", fk="subbreeds")
assert (
"CREATE TABLE [dogs] ( [name] TEXT , [breed_id] INTEGER, [subbreed_id] TEXT, "
"FOREIGN KEY(breed_id) REFERENCES breeds(rowid), "
"FOREIGN KEY(subbreed_id) REFERENCES subbreeds(primkey) )"
== collapse_whitespace(fresh_db["dogs"].schema)
)
def test_add_foreign_key_guess_table(fresh_db):
fresh_db.create_table("dogs", {"name": str})
fresh_db.create_table("breeds", {"name": str, "id": int}, pk="id")
fresh_db["dogs"].add_column("breed_id", int)
fresh_db["dogs"].add_foreign_key("breed_id")
assert (
"CREATE TABLE [dogs] ( [name] TEXT , [breed_id] INTEGER, FOREIGN KEY(breed_id) REFERENCES breeds(id) )"
== collapse_whitespace(fresh_db["dogs"].schema)
)
def test_index_foreign_keys(fresh_db):
test_add_foreign_key_guess_table(fresh_db)
assert [] == fresh_db["dogs"].indexes
fresh_db.index_foreign_keys()
assert [["breed_id"]] == [i.columns for i in fresh_db["dogs"].indexes]
# Calling it a second time should do nothing
fresh_db.index_foreign_keys()
assert [["breed_id"]] == [i.columns for i in fresh_db["dogs"].indexes]
@pytest.mark.parametrize(
"extra_data,expected_new_columns",
[
({"species": "squirrels"}, [{"name": "species", "type": "TEXT"}]),
(
{"species": "squirrels", "hats": 5},
[{"name": "species", "type": "TEXT"}, {"name": "hats", "type": "INTEGER"}],
),
(
{"hats": 5, "rating": 3.5},
[{"name": "hats", "type": "INTEGER"}, {"name": "rating", "type": "FLOAT"}],
),
],
)
@pytest.mark.parametrize("use_table_factory", [True, False])
def test_insert_row_alter_table(
fresh_db, extra_data, expected_new_columns, use_table_factory
):
table = fresh_db["books"]
table.insert({"title": "Hedgehogs of the world", "author_id": 1})
assert [
{"name": "title", "type": "TEXT"},
{"name": "author_id", "type": "INTEGER"},
] == [{"name": col.name, "type": col.type} for col in table.columns]
record = {"title": "Squirrels of the world", "author_id": 2}
record.update(extra_data)
if use_table_factory:
fresh_db.table("books", alter=True).insert(record)
else:
fresh_db["books"].insert(record, alter=True)
assert [
{"name": "title", "type": "TEXT"},
{"name": "author_id", "type": "INTEGER"},
] + expected_new_columns == [
{"name": col.name, "type": col.type} for col in table.columns
]
def test_insert_row_alter_table_invalid_column_characters(fresh_db):
table = fresh_db["table"]
rowid = table.insert({"foo": "bar"}).last_pk
with pytest.raises(AssertionError):
table.insert({"foo": "baz", "new_col[abc]": 1.2}, alter=True)
@pytest.mark.parametrize("use_table_factory", [True, False])
def test_insert_replace_rows_alter_table(fresh_db, use_table_factory):
first_row = {"id": 1, "title": "Hedgehogs of the world", "author_id": 1}
next_rows = [
{"id": 1, "title": "Hedgehogs of the World", "species": "hedgehogs"},
{"id": 2, "title": "Squirrels of the World", "num_species": 200},
{
"id": 3,
"title": "Badgers of the World",
"significant_continents": ["Europe", "North America"],
},
]
if use_table_factory:
table = fresh_db.table("books", pk="id", alter=True)
table.insert(first_row)
table.insert_all(next_rows, replace=True)
else:
table = fresh_db["books"]
table.insert(first_row, pk="id")
table.insert_all(next_rows, alter=True, replace=True)
assert {
"author_id": int,
"id": int,
"num_species": int,
"significant_continents": str,
"species": str,
"title": str,
} == table.columns_dict
assert [
{
"author_id": None,
"id": 1,
"num_species": None,
"significant_continents": None,
"species": "hedgehogs",
"title": "Hedgehogs of the World",
},
{
"author_id": None,
"id": 2,
"num_species": 200,
"significant_continents": None,
"species": None,
"title": "Squirrels of the World",
},
{
"author_id": None,
"id": 3,
"num_species": None,
"significant_continents": '["Europe", "North America"]',
"species": None,
"title": "Badgers of the World",
},
] == list(table.rows)
def test_bulk_insert_more_than_999_values(fresh_db):
"Inserting 100 items with 11 columns should work"
fresh_db["big"].insert_all(
(
{
"id": i + 1,
"c2": 2,
"c3": 3,
"c4": 4,
"c5": 5,
"c6": 6,
"c7": 7,
"c8": 8,
"c8": 9,
"c10": 10,
"c11": 11,
}
for i in range(100)
),
pk="id",
)
assert 100 == fresh_db["big"].count
@pytest.mark.parametrize(
"num_columns,should_error", ((900, False), (999, False), (1000, True))
)
def test_error_if_more_than_999_columns(fresh_db, num_columns, should_error):
record = dict([("c{}".format(i), i) for i in range(num_columns)])
if should_error:
with pytest.raises(AssertionError):
fresh_db["big"].insert(record)
else:
fresh_db["big"].insert(record)
@pytest.mark.parametrize(
"columns,index_name,expected_index",
(
(
["is good dog"],
None,
Index(
seq=0,
name="idx_dogs_is good dog",
unique=0,
origin="c",
partial=0,
columns=["is good dog"],
),
),
(
["is good dog", "age"],
None,
Index(
seq=0,
name="idx_dogs_is good dog_age",
unique=0,
origin="c",
partial=0,
columns=["is good dog", "age"],
),
),
(
["age"],
"age_index",
Index(
seq=0,
name="age_index",
unique=0,
origin="c",
partial=0,
columns=["age"],
),
),
),
)
def test_create_index(fresh_db, columns, index_name, expected_index):
dogs = fresh_db["dogs"]
dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is good dog": True})
assert [] == dogs.indexes
dogs.create_index(columns, index_name)
assert expected_index == dogs.indexes[0]
def test_create_index_unique(fresh_db):
dogs = fresh_db["dogs"]
dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True})
assert [] == dogs.indexes
dogs.create_index(["name"], unique=True)
assert (
Index(
seq=0,
name="idx_dogs_name",
unique=1,
origin="c",
partial=0,
columns=["name"],
)
== dogs.indexes[0]
)
def test_create_index_if_not_exists(fresh_db):
dogs = fresh_db["dogs"]
dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True})
assert [] == dogs.indexes
dogs.create_index(["name"])
assert 1 == len(dogs.indexes)
with pytest.raises(Exception, match="index idx_dogs_name already exists"):
dogs.create_index(["name"])
dogs.create_index(["name"], if_not_exists=True)
@pytest.mark.parametrize(
"data_structure",
(
["list with one item"],
["list with", "two items"],
{"dictionary": "simple"},
{"dictionary": {"nested": "complex"}},
collections.OrderedDict([("key1", {"nested": "complex"}), ("key2", "foo"),]),
[{"list": "of"}, {"two": "dicts"}],
),
)
def test_insert_dictionaries_and_lists_as_json(fresh_db, data_structure):
fresh_db["test"].insert({"id": 1, "data": data_structure}, pk="id")
row = fresh_db.conn.execute("select id, data from test").fetchone()
assert row[0] == 1
assert data_structure == json.loads(row[1])
def test_insert_thousands_using_generator(fresh_db):
fresh_db["test"].insert_all(
{"i": i, "word": "word_{}".format(i)} for i in range(10000)
)
assert [{"name": "i", "type": "INTEGER"}, {"name": "word", "type": "TEXT"}] == [
{"name": col.name, "type": col.type} for col in fresh_db["test"].columns
]
assert 10000 == fresh_db["test"].count
def test_insert_thousands_ignores_extra_columns_after_first_100(fresh_db):
fresh_db["test"].insert_all(
[{"i": i, "word": "word_{}".format(i)} for i in range(100)]
+ [{"i": 101, "extra": "This extra column should cause an exception"}]
)
rows = fresh_db.execute_returning_dicts("select * from test where i = 101")
assert [{"i": 101, "word": None}] == rows
def test_insert_ignore(fresh_db):
fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id")
# Should raise an error if we try this again
with pytest.raises(Exception, match="UNIQUE constraint failed"):
fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id")
# Using ignore=True should cause our insert to be silently ignored
fresh_db["test"].insert({"id": 1, "bar": 3}, pk="id", ignore=True)
# Only one row, and it should be bar=2, not bar=3
rows = fresh_db.execute_returning_dicts("select * from test")
assert [{"id": 1, "bar": 2}] == rows
def test_insert_hash_id(fresh_db):
dogs = fresh_db["dogs"]
id = dogs.insert({"name": "Cleo", "twitter": "cleopaws"}, hash_id="id").last_pk
assert "f501265970505d9825d8d9f590bfab3519fb20b1" == id
assert 1 == dogs.count
# Insert replacing a second time should not create a new row
id2 = dogs.insert(
{"name": "Cleo", "twitter": "cleopaws"}, hash_id="id", replace=True
).last_pk
assert "f501265970505d9825d8d9f590bfab3519fb20b1" == id2
assert 1 == dogs.count
def test_vacuum(fresh_db):
fresh_db["data"].insert({"foo": "foo", "bar": "bar"})
fresh_db.vacuum()
def test_works_with_pathlib_path(tmpdir):
path = pathlib.Path(tmpdir / "test.db")
db = Database(path)
db["demo"].insert_all([{"foo": 1}])
assert 1 == db["demo"].count
@pytest.mark.skipif(pd is None, reason="pandas and numpy are not installed")
def test_create_table_numpy(fresh_db):
import numpy as np
df = pd.DataFrame({"col 1": range(3), "col 2": range(3)})
fresh_db["pandas"].insert_all(df.to_dict(orient="records"))
assert [
{"col 1": 0, "col 2": 0},
{"col 1": 1, "col 2": 1},
{"col 1": 2, "col 2": 2},
] == list(fresh_db["pandas"].rows)
# Now try all the different types
df = pd.DataFrame(
{
"np.int8": [-8],
"np.int16": [-16],
"np.int32": [-32],
"np.int64": [-64],
"np.uint8": [8],
"np.uint16": [16],
"np.uint32": [32],
"np.uint64": [64],
"np.float16": [16.5],
"np.float32": [32.5],
"np.float64": [64.5],
}
)
df = df.astype(
{
"np.int8": "int8",
"np.int16": "int16",
"np.int32": "int32",
"np.int64": "int64",
"np.uint8": "uint8",
"np.uint16": "uint16",
"np.uint32": "uint32",
"np.uint64": "uint64",
"np.float16": "float16",
"np.float32": "float32",
"np.float64": "float64",
}
)
assert [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
] == [str(t) for t in df.dtypes]
fresh_db["types"].insert_all(df.to_dict(orient="records"))
assert [
{
"np.float16": 16.5,
"np.float32": 32.5,
"np.float64": 64.5,
"np.int16": -16,
"np.int32": -32,
"np.int64": -64,
"np.int8": -8,
"np.uint16": 16,
"np.uint32": 32,
"np.uint64": 64,
"np.uint8": 8,
}
] == list(fresh_db["types"].rows)
def test_cannot_provide_both_filename_and_memory():
with pytest.raises(
AssertionError, match="Either specify a filename_or_conn or pass memory=True"
):
Database("/tmp/foo.db", memory=True)
def test_creates_id_column(fresh_db):
last_pk = fresh_db.table("cats", pk="id").insert({"name": "barry"}).last_pk
assert [{"name": "barry", "id": last_pk}] == list(fresh_db["cats"].rows)
def test_drop(fresh_db):
fresh_db["t"].insert({"foo": 1})
assert ["t"] == fresh_db.table_names()
assert None is fresh_db["t"].drop()
assert [] == fresh_db.table_names()
def test_drop_view(fresh_db):
fresh_db.create_view("foo_view", "select 1")
assert ["foo_view"] == fresh_db.view_names()
assert None is fresh_db["foo_view"].drop()
assert [] == fresh_db.view_names()
def test_insert_all_empty_list(fresh_db):
fresh_db["t"].insert({"foo": 1})
assert 1 == fresh_db["t"].count
fresh_db["t"].insert_all([])
assert 1 == fresh_db["t"].count
fresh_db["t"].insert_all([], replace=True)
assert 1 == fresh_db["t"].count
def test_create_with_a_null_column(fresh_db):
record = {"name": "Name", "description": None}
fresh_db["t"].insert(record)
assert [record] == list(fresh_db["t"].rows)
def test_create_with_nested_bytes(fresh_db):
record = {"id": 1, "data": {"foo": b"bytes"}}
fresh_db["t"].insert(record)
assert [{"id": 1, "data": '{"foo": "b\'bytes\'"}'}] == list(fresh_db["t"].rows)
|
<reponame>richardeverson/warpcmap
import numpy as np
from scipy.special import betainc
from matplotlib import cm, rcParams
from matplotlib.colors import ListedColormap
from matplotlib.pyplot import gca
from scipy.optimize import root_scalar
def warp_colormap(basemap, z, beta=1, Nentries=256):
"""
Construct a new colormap by warping basemap so that the colour
in "middle" of the basemap is (ie, corresponding to a value of 0.5)
corresponds to the value z and the rate of change of colours around z
is given by beta.
Parameters
----------
basemap: Matplotlib ColorMap or string naming one.
The colormap to warp
z: float (0 < z < 1)
The location that the middle of the basemap is warped to.
For instance, if the basemap is the 'jet' colourmap, so that
the middle of the map is green, then in the new colourmap,
green corresponds to the value z.
beta: float (beta > 0)
Beta controls the rate of change of colours close to z. It is
approximately the gradient of the mapping between the new colours
and the old colours. Values of beta in the range 1 to 5 are usual
as they give more resolution to the data values close to z.
beta < 1 compresses the colours close to z.
Nentries: int
Number of entries in the new colourmap.
Returns
-------
newmap: ListedColormap
The warped colourmap.
Example
-------
Emphasise the data values around 0.8
>>> cmap = warp_colormap('jet', z=0.8, beta=3)
>>> imshow(X, cmap=cmap)
>>> colorbar()
Note
----
If the range of the data is not [0, 1], then z is linearly mapped
to the data range so if the data range is (100, 120) and z = 0.8,
then the emphasised values will be around 116.
"""
if isinstance(basemap, str):
basemap = cm.get_cmap(basemap)
def objective(alpha):
return betainc(alpha, beta, z) - 0.5
soln = root_scalar(objective, bracket=(1e-10, 1e2))
alpha = soln.root
rgba = np.zeros((Nentries, 4))
for n, y in enumerate(np.linspace(0, 1, Nentries)):
x = betainc(alpha, beta, y)
rgba[n,:] = basemap(x)
newmap = ListedColormap(rgba)
return newmap
def wimshow(X,
cmap=None,
vmin=None, vmax=None, vmid=None, beta=1, Nentries=256,
ax=None,
**kwargs):
"""
Convenience wrapper for `imshow` for displaying scalar data that allows
setting of the range of the data values that the coloramp covers and
how it is warped.
Parameters
----------
X : array-like
The image with scalar data. The data is visualized
using a colormap. If RGB or RGBA data is to be displayed, just
use `imshow` directly.
The two dimensions (M, N) define the rows and columns of
the image.
cmap : str or `~matplotlib.colors.Colormap`, optional
The Colormap instance or registered colormap name used to map
scalar data to colors.
Defaults to :rc:`image.cmap`.
vmin, vmax : scalar, optional
When using scalar data and no explicit *norm*, *vmin* and *vmax*
define the data range that the colormap covers. By default,
the colormap covers the complete value range of the supplied
data. *vmin*, *vmax* are ignored if the *norm* parameter is used.
vmid: scalar, optional
The data value that the middle of the colormap is warped to.
For instance, if the colormap is the 'jet' colourmap, so that
the middle of the map is green, then in the new colourmap,
green corresponds to the value vmid.
Default is the middle of the range of the data.
beta: scalar > 0, optional
Beta controls the rate of change of colours close to *vmid*.
Larger values of beta give a more rapid change of colour with
data value. Values of beta in the range 1 to 5 are usual
as they give more resolution to the data values close to *vmid*.
beta < 1 compresses the colours close to *vmid*.
Nentries: int, optional
Number of entries in the warped colourmap.
Default: 256
ax: matplotlib.axes.Axes, optional
The Matplotlib axes in which to plot.
Default: The current axes.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other Parameters
----------------
All other parameters are passed directly to `imshow`.
"""
assert len(X.shape) == 2, "wimshow only supports scalar data; use imshow for RGB and RGBA data"
if cmap is None:
cmap = rcParams['image.cmap']
if vmin is None:
vmin = X.min()
if vmax is None:
vmax = X.max()
if vmid is None:
vmid = (vmin + vmax)/2
if ax is None:
ax = gca()
z = (vmid - vmin)/(vmax-vmin)
warped = warp_colormap(cmap, z, beta=beta, Nentries=Nentries)
return ax.imshow(X, cmap=warped, vmin=vmin, vmax=vmax, **kwargs)
def wpcolormesh(*args,
cmap=None,
vmin=None, vmax=None, vmid=None, beta=1, Nentries=256,
ax=None,
**kwargs):
"""
Convenience wrapper for `pcolormesh` for displaying scalar data that allows
setting of the range of the data values that the coloramp covers and
how it is warped.
Call signature::
wpcolormesh([X, Y,] C, **kwargs)
*X* and *Y* can be used to specify the corners of the rectangles.
Parameters
----------
C : 2D array-like
The values to be color-mapped.
X, Y : array-like, optional
The coordinates of the corners of quadrilaterals of a pcolormesh.
See `pcolormesh` for details.
cmap : str or `~matplotlib.colors.Colormap`, optional
The Colormap instance or registered colormap name used to map
scalar data to colors.
Defaults to :rc:`image.cmap`.
vmin, vmax : scalar, optional
When using scalar data and no explicit *norm*, *vmin* and *vmax*
define the data range that the colormap covers. By default,
the colormap covers the complete value range of the supplied
data. *vmin*, *vmax* are ignored if the *norm* parameter is used.
vmid: scalar, optional
The data value that the middle of the colormap is warped to.
For instance, if the colormap is the 'jet' colourmap, so that
the middle of the map is green, then in the new colourmap,
green corresponds to the value vmid.
Default is the middle of the range of the data or (*vmin* + *vmax*)/2
if they are specified.
beta: scalar > 0, optional
Beta controls the rate of change of colours close to *vmid*.
Larger values of beta give a more rapid change of colour with
data value. Values of beta in the range 1 to 5 are usual
as they give more resolution to the data values close to *vmid*.
beta < 1 compresses the colours close to *vmid*.
Nentries: int, optional
Number of entries in the warped colourmap.
Default: 256
ax: matplotlib.axes.Axes, optional
The Matplotlib axes in which to plot.
Default: The current axes.
Returns
-------
`matplotlib.collections.QuadMesh`
Other Parameters
----------------
All other parameters are passed directly to `pcolormesh`.
"""
if len(args) == 1:
C = args[0]
elif len(args) == 3:
C = args[2]
else:
raise TypeError(f'wpcolormesh() takes 1 or 3 positional arguments '
f'but {len(args)} were given')
if cmap is None:
cmap = rcParams['image.cmap']
if vmin is None:
vmin = C.min()
if vmax is None:
vmax = C.max()
if vmid is None:
vmid = (vmin + vmax)/2
if ax is None:
ax = gca()
z = (vmid - vmin)/(vmax-vmin)
warped = warp_colormap(cmap, z, beta=beta, Nentries=Nentries)
return ax.pcolormesh(*args, cmap=warped, vmin=vmin, vmax=vmax, **kwargs)
def wpcolor(*args,
cmap=None,
vmin=None, vmax=None, vmid=None, beta=1, Nentries=256,
ax=None,
**kwargs):
"""
Convenience wrapper for `pcolor` for displaying scalar data that allows
setting of the range of the data values that the coloramp covers and
how it is warped.
Call signature::
wpcolor([X, Y,] C, **kwargs)
*X* and *Y* can be used to specify the corners of the rectangles.
Parameters
----------
C : 2D array-like
The values to be color-mapped.
X, Y : array-like, optional
The coordinates of the corners of quadrilaterals of a pcolor.
See `pcolor` for details.
cmap : str or `~matplotlib.colors.Colormap`, optional
The Colormap instance or registered colormap name used to map
scalar data to colors.
Defaults to :rc:`image.cmap`.
vmin, vmax : scalar, optional
When using scalar data and no explicit *norm*, *vmin* and *vmax*
define the data range that the colormap covers. By default,
the colormap covers the complete value range of the supplied
data. *vmin*, *vmax* are ignored if the *norm* parameter is used.
vmid: scalar, optional
The data value that the middle of the colormap is warped to.
For instance, if the colormap is the 'jet' colourmap, so that
the middle of the map is green, then in the new colourmap,
green corresponds to the value vmid.
Default is the middle of the range of the data or (*vmin* + *vmax*)/2
if they are specified.
beta: scalar > 0, optional
Beta controls the rate of change of colours close to *vmid*.
Larger values of beta give a more rapid change of colour with
data value. Values of beta in the range 1 to 5 are usual
as they give more resolution to the data values close to *vmid*.
beta < 1 compresses the colours close to *vmid*.
Nentries: int, optional
Number of entries in the warped colourmap.
Default: 256
ax: matplotlib.axes.Axes, optional
The Matplotlib axes in which to plot.
Default: The current axes.
Returns
-------
`matplotlib.collections.QuadMesh`
Other Parameters
----------------
All other parameters are passed directly to `pcolor`.
"""
if len(args) == 1:
C = args[0]
elif len(args) == 3:
C = args[2]
else:
raise TypeError(f'wpcolor() takes 1 or 3 positional arguments '
f'but {len(args)} were given')
if cmap is None:
cmap = rcParams['image.cmap']
if vmin is None:
vmin = C.min()
if vmax is None:
vmax = C.max()
if vmid is None:
vmid = (vmin + vmax)/2
if ax is None:
ax = gca()
z = (vmid - vmin)/(vmax-vmin)
warped = warp_colormap(cmap, z, beta=beta, Nentries=Nentries)
return ax.pcolor(*args, cmap=warped, vmin=vmin, vmax=vmax, **kwargs)
|
"""
Provides the main Hibiki class used for the music synchronization.
"""
import json
import os
import os.path
import random
import shutil
from .config import HibikiConfig
from .itunes import iTunesLibrary
class Hibiki(object):
"""Main class used for the music syncing."""
def __init__(self, config=None):
self._subfolder = 0
self.itunes = None
self.tracks = set()
if config:
self.config = config
self.config.parent = self
self.update_itunes()
else:
self.config = HibikiConfig(parent=self)
@property
def library_data(self):
"""Returns the JSON data written in the library file."""
with open(self.config.library_path, 'r') as file:
try:
return json.load(file)
except ValueError:
return {}
@library_data.setter
def library_data(self, value):
with open(self.config.library_path, 'w') as file:
json.dump(value, file, separators=(',', ':'))
@property
def target_directory(self):
"""Returns the target directory for file copy operations. If
config.use_subfolders is set to False, always returns the destination
folder. Else it returns the first folder that has less than the maximum
allowed amount of files. The last non-full folder is saved in
_subfolder so that full directories don't get checked again.
Filenames starting with '.' are ignored.
"""
if self.config.use_subfolders:
while True:
directory = os.path.join(self.config.destination,
str(self._subfolder))
if os.path.isdir(directory):
file_count = 0
for file in os.listdir(directory):
if file[0] != '.':
file_count += 1
if file_count < self.config.max_file_count:
return directory
else:
self._subfolder += 1
else:
os.makedirs(directory)
return directory
else:
return self.config.destination
def _copy_file(self, track):
"""Performs the file copy operation."""
destination_path = os.path.join(self.target_directory, track.filename)
with open(track.path, 'rb') as fin:
with open(destination_path, 'wb') as fout:
shutil.copyfileobj(fin, fout)
return destination_path
def _clean_sync_list(self, delete_callback=None, error_callback=None):
"""Removes all the tracks not present in the sync list from destination
and removes tracks from the sync list if already present on the
destination.
"""
library = self.library_data
for track in library.copy():
if track in self.tracks:
self.tracks.remove(track)
else:
path = os.path.join(self.config.destination, library[track])
try:
os.remove(path)
except FileNotFoundError as error:
if error_callback:
error_callback(path, error)
continue
if delete_callback:
delete_callback(library[track])
del library[track]
self.library_data = library
def _mark_file(self, track, destination):
"""Writes the file persistant ID and path into to library file."""
data = self.library_data
data[track.persistent_id] = os.path.relpath(destination,
self.config.destination)
self.library_data = data
def calculate_space(self):
"""Calculates the available space if all the tracks in the library were
to be removed.
"""
available = self.space_available()
library = self.library_data
for track in library.copy():
path = self.full_library_path(library[track])
try:
stat = os.stat(path)
except FileNotFoundError:
del library[track]
else:
available += stat.st_size
self.library_data = library
return available
def copy_tracks(self, after_callback=None, before_callback=None,
error_callback=None, end_signal=None):
"""Goes through the tracks in the iTunes library and copies the tracks
onto the destination. before_callback and after_callback are called
with the track object if they are set before and after the copy process
respectively. The copying process will last until the track list has
been exhausted or the end_signal is True.
"""
track_iterator = self.itunes.tracks
if not end_signal:
end_signal = False
while not end_signal:
try:
track = next(track_iterator)
except StopIteration:
return
if track.persistent_id in self.tracks:
if before_callback:
before_callback(track)
try:
destination = self._copy_file(track)
except OSError as error:
os.remove(destination)
if error_callback:
error_callback(track, error)
continue
self._mark_file(track, destination)
if after_callback:
after_callback(track)
def full_library_path(self, track):
"""Returns the full path for the relative library paths."""
return os.path.join(self.config.destination, track)
def generate_sync_list(self, delete_callback=None, error_callback=None):
"""Generates a set of the items to be synced using the iTunes
persistent IDs and the available space on the target destination if all
the current tracks were to be deleted. Adds random items to the sync
list if config.random_fill returns True.
"""
space = self.calculate_space()
self.config.excludes.get_playlist_tracks()
self.config.includes.get_playlist_tracks()
for track in self.itunes.tracks:
if self.config.excludes.is_filtered(track):
continue
if self.config.includes.is_filtered(track):
if space >= track.size:
space -= track.size
self.tracks.add(track.persistent_id)
if self.config.random_fill:
random.seed()
music = list(self.itunes.tracks)
while len(music) > 0:
track = random.sample(music, 1)[0]
if not self.config.excludes.is_filtered(track):
if track.persistent_id not in self.tracks:
if space >= track.size:
space -= track.size
self.tracks.add(track.persistent_id)
music.remove(track)
self._clean_sync_list(delete_callback=delete_callback,
error_callback=error_callback)
def space_available(self, reserve=5):
"""Returns the number of available bytes on the target destination.
Reserves 5 MB of free space by default on the drive just in case.
"""
drive_stats = os.statvfs(self.config.destination)
space = drive_stats.f_bavail * drive_stats.f_frsize
return space - (reserve * 1024 * 1024)
def update_itunes(self):
"""Sets the self.itunes instance to a new iTunesLibrary object found in
the path defined by the self.config object.
"""
self.itunes = iTunesLibrary(self.config.itunes_path)
|
import os, uuid, time
from datetime import datetime
import boto3
import requests
from urllib3.util import Url
from .parallel_logger import logger
from .utils import sizeof_fmt, measure_duration_and_rate
class BundleStorer:
def __init__(self, bundle, dss_url, use_rest_api=False, report_task_ids=False):
self.bundle = bundle
self.file_info = []
driver = 'rest' if use_rest_api else 'python'
self.api = DataStoreAPI(driver=driver, endpoint_url=dss_url, report_task_ids=report_task_ids)
def store_bundle(self):
try:
logger.output(f"\n{self.bundle.path}:", progress_char="B")
self._assign_uuids()
self.bundle.submission_info.save()
self._store_files()
self._register_bundle()
logger.flush()
except DSSAPIError as e:
logger.output(f"\n\nERROR attempting to store bundle {self.bundle.path}: {str(e)}\n",
progress_char="!", flush=True)
def _store_files(self):
for file in self.bundle.files.values():
size_message = f" ({sizeof_fmt(file.size)})" if not file.is_metadata() else ""
logger.output(f"\n storing file {file.name}{size_message} as {file.uuid}...")
version, duration, rate = measure_duration_and_rate(self.api.put_file,
self.bundle.uuid, file.uuid, file.staged_url,
size=file.size)
self.file_info.append({
'name': file.name,
'uuid': file.uuid,
'version': version,
'indexed': file.is_metadata()
})
logger.output(" %s (%.1f sec, %.1f MiB/sec)" % (version, duration, rate), progress_char="s")
def _assign_uuids(self):
if not self.bundle.uuid:
self.bundle.uuid = str(uuid.uuid4())
for file in self.bundle.files.values():
if not file.uuid:
file.uuid = str(uuid.uuid4())
def _register_bundle(self):
logger.output(f"\n registering bundle {self.bundle.uuid}... ")
version = self.api.put_bundle(self.bundle.uuid, self.file_info)
logger.output(version, progress_char='✔︎')
class StagedBundleFinder:
def __init__(self):
self.bundle_paths = list()
self.s3 = boto3.client('s3')
def paths_of_bundles_under(self, s3url: Url) -> list:
# Assumption: bundles are stored at **/bundles/bundleX/
logger.output(f"\nFinding bundles under {str(s3url)}...", flush=True)
self._search_for_bundles_in_folder(bucket=s3url.host, root_path=s3url.path.lstrip('/'))
logger.output("\n")
return self.bundle_paths
def _search_for_bundles_in_folder(self, bucket: str, root_path: str) -> list:
for folder_path in self._subfolders_in_folder(bucket, root_path):
if self._is_bundle_home(root_path):
self.bundle_paths.append(folder_path.rstrip('/'))
else:
self._search_for_bundles_in_folder(bucket, folder_path)
def _subfolders_in_folder(self, bucket: str, folder_path: str):
paginator = self.s3.get_paginator('list_objects')
for page in paginator.paginate(Bucket=bucket, Prefix=folder_path, Delimiter='/'):
if 'CommonPrefixes' in page:
for obj in page['CommonPrefixes']:
yield obj['Prefix']
@staticmethod
def _is_bundle_home(path: str) -> bool:
return os.path.basename(path.rstrip('/')) == 'bundles'
class DSSAPIError(RuntimeError):
pass
class DSSDriver:
FAKE_CREATOR_UID = 104
DEFAULT_DSS_REPLICA = 'aws'
BACKOFF_FACTOR = 1.618
RESPONSE_FIELDS_TO_DUMP = ('status_code', 'reason', 'content', 'headers', 'url', 'history', 'encoding', 'elapsed')
def __init__(self, endpoint_url, report_task_ids=False):
self.dss_url = endpoint_url
self.report_task_ids = report_task_ids
def put_file(self, bundle_uuid: str, file_uuid: str, file_location: str):
raise NotImplementedError()
def put_bundle(self, bundle_uuid: str, file_info: list):
raise NotImplementedError()
def _dump_response(self, response):
return "\n".join([f"\t{attr}={getattr(response, attr)}" for attr in self.RESPONSE_FIELDS_TO_DUMP])
class DSSpythonDriver(DSSDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def put_file(self, bundle_uuid: str, file_uuid: str, file_location: str):
from hca import api as hca_api
response = hca_api.put_files(uuid=file_uuid,
source_url=file_location,
creator_uid=104,
bundle_uuid=bundle_uuid,
api_url=self.dss_url
)
if response.status_code != 201:
print(f"ERROR: put_files() returned {response.status_code}: {response.text}")
exit(1)
return response.json()['version']
def put_bundle(self, bundle_uuid: str, file_info: list):
from hca import api as hca_api
response = hca_api.put_bundles(bundle_uuid, self.DEFAULT_DSS_REPLICA, self.FAKE_CREATOR_UID, file_info)
if response.status_code != 201:
print(f"ERROR: put_files() returned {response.status_code}: {response.text}")
exit(1)
return response.json()['version']
class DSSrestDriver(DSSDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def put_file(self, bundle_uuid: str, file_uuid: str, file_location: str):
payload = {
'bundle_uuid': bundle_uuid,
'creator_uid': 104,
'source_url': file_location,
}
url = f"{self.dss_url}/files/{file_uuid}"
params = {'version': datetime.now().isoformat()}
response = requests.put(url, params=params, json=payload)
if response.status_code == 201:
return response.json()['version']
elif response.status_code == 202:
if self.report_task_ids:
logger.output(f"\n ACCEPTED: task_id={response.json()['task_id']}, waiting.")
response = self._wait_for_file_to_exist(file_uuid)
return response.headers['X-DSS-VERSION']
else:
raise DSSAPIError(f"put({url}, {params}, {payload}) returned:\n{self._dump_response(response)}")
def head_file(self, file_uuid: str, version: str=None):
if version:
url = f"{self.dss_url}/files/{file_uuid}?version={version}"
else:
url = f"{self.dss_url}/files/{file_uuid}"
params = {'replica': self.DEFAULT_DSS_REPLICA}
response = requests.head(url, params=params)
return response
def put_bundle(self, bundle_uuid: str, file_info: list):
payload = {
'creator_uid': self.FAKE_CREATOR_UID,
'files': file_info
}
url = f"{self.dss_url}/bundles/{bundle_uuid}"
params = {'version': datetime.now().isoformat(), 'replica': 'aws'}
response = requests.put(url, params=params, json=payload)
if response.status_code != 201:
raise DSSAPIError(f"put({url}, {params}, {payload}) returned:\n{self._dump_response(response)}")
return response.json()['version']
def _wait_for_file_to_exist(self, file_uuid, timeout_seconds=30*60):
timeout = time.time() + timeout_seconds
wait = 1.0
while time.time() < timeout:
response = self.head_file(file_uuid)
if response.status_code == 200:
return response
elif response.status_code in (404, 504):
time.sleep(wait)
logger.output(".") #, flush=True) flushing progress dots is problematic when parallelized.
wait = min(60.0, wait * self.BACKOFF_FACTOR)
else:
raise RuntimeError(response)
else:
raise RuntimeError(f"File {file_uuid} did not appear within {timeout_seconds} seconds")
class DataStoreAPI:
DEFAULT_DSS_URL = "https://dss.dev.data.humancellatlas.org/v1"
def __init__(self, driver='rest', endpoint_url=DEFAULT_DSS_URL, report_task_ids: bool=False):
driver_name = f"DSS{driver}Driver"
self.driver = eval(driver_name)(endpoint_url=endpoint_url, report_task_ids=report_task_ids)
def put_file(self, *args, **kwargs):
return self.driver.put_file(*args, **kwargs)
def put_bundle(self, *args, **kwargs):
return self.driver.put_bundle(*args, **kwargs)
|
import matplotlib.pyplot as plt
from beast.observationmodel.noisemodel import toothpick
from beast.physicsmodel.grid import SEDGrid
from beast.plotting.beastplotlib import set_params
__all__ = ["plot_toothpick_details"]
def plot_toothpick_details(asts_filename, seds_filename, savefig=False):
"""
Plot the details of the toothpick noisemodel creation for each filter.
These plots show the individual AST results as points as
(flux_in - flux_out)/flux_in. In addition, the binned values of these
points are plotted giving the bias term in the observation model.
Error bars around the binned bias values give the binned sigma term of
the observation model. Finally, as a separate column of plots the
binned completeness in each filter is plotted.
Parameters
----------
asts_filename : str
filename with the AST results
seds_filename : str
filename with the SED grid (used just for the filter information)
savefig : str (default=False)
to save the figure, set this to the file extension (e.g., 'png', 'pdf')
"""
sedgrid = SEDGrid(seds_filename, backend="cache")
# read in AST results
model = toothpick.MultiFilterASTs(asts_filename, sedgrid.filters)
# set the column mappings as the external file is BAND_VEGA or BAND_IN
model.set_data_mappings(upcase=True, in_pair=("in", "in"), out_pair=("out", "rate"))
# compute binned biases, uncertainties, and completeness as a function of band flux
ast_nonrecovered_ratio = 2.0
model.fit_bins(
nbins=50,
ast_nonrecovered_ratio=ast_nonrecovered_ratio,
)
nfilters = len(sedgrid.filters)
figsize_y = nfilters * 3
fig, ax = plt.subplots(nrows=nfilters, ncols=2, figsize=(14, figsize_y), sharex=True)
set_params()
for i, cfilter in enumerate(sedgrid.filters):
mag_in = model.data[model.filter_aliases[cfilter + "_in"]]
flux_out = model.data[model.filter_aliases[cfilter + "_out"]]
flux_in = (10 ** (-0.4 * mag_in)) * model.vega_flux[i]
flux_out *= model.vega_flux[i]
gvals = flux_out != 0.0
ax[i, 0].plot(
flux_in[gvals],
flux_out[gvals] / flux_in[gvals],
"ko",
alpha=0.1,
markersize=2,
)
# not all bins are filled with good data
ngbins = model._nasts[i]
ax[i, 0].plot(
model._fluxes[0:ngbins, i],
1. + model._biases[0:ngbins, i] / model._fluxes[0:ngbins, i],
"b-",
)
ax[i, 0].errorbar(
model._fluxes[0:ngbins, i],
1. + model._biases[0:ngbins, i] / model._fluxes[0:ngbins, i],
yerr=model._sigmas[0:ngbins, i] / model._fluxes[0:ngbins, i],
fmt="bo",
markersize=2,
alpha=0.5,
)
if ast_nonrecovered_ratio is not None:
ax[i, 0].axhline(
ast_nonrecovered_ratio, linestyle="--", alpha=0.25, color="k"
)
ax[i, 0].set_ylim(-10, 2.5)
ax[i, 0].set_ylabel(r"$F_o/F_i$")
ax[i, 1].plot(
model._fluxes[0:ngbins, i],
model._compls[0:ngbins, i],
"b-",
)
ax[i, 1].yaxis.tick_right()
ax[i, 1].yaxis.set_label_position("right")
ax[i, 1].set_ylim(0, 1)
ax[i, 1].set_xscale("log")
sfilt = cfilter.split("_")[-1]
ax[i, 1].set_ylabel(f"C({sfilt})")
ax[nfilters - 1, 0].set_xlabel(r"$F_i$")
ax[nfilters - 1, 1].set_xlabel(r"$F_i$")
# add in the zero line
# do after all the data has been plotted to get the full x range
pxrange = ax[0, 0].get_xlim()
for i, cfilter in enumerate(sedgrid.filters):
ax[i, 0].plot(pxrange, [1.0, 1.0], "k--", alpha=0.5)
# figname
basename = asts_filename.replace(".fits", "_plot")
fig.tight_layout()
# save or show fig
if savefig:
fig.savefig("{}.{}".format(basename, savefig))
else:
plt.show()
|
# -*- coding=utf-8 -*-
from flask import render_template, request, current_app, flash
from flask import jsonify, json
from flask_login import login_required
from xp_mall.extensions import db
from xp_mall.utils import redirect_back
from xp_mall.admin.admin_module import admin_module
from xp_mall.models.goods import Goods, GoodsComments
from xp_mall.models.category import GoodsCategory
from xp_mall.models.tags import tags_goods, GoodsTags
from xp_mall.forms.goods import GoodsForm
import json as sys_json
@admin_module.route('/manage/goods', defaults={'category_id': None})
@admin_module.route('/manage/goods/<int:category_id>', methods=['GET'])
@login_required
def manage_goods(category_id=None):
page = request.args.get('page', 1, type=int)
if not category_id:
pagination = Goods.query.order_by(Goods.timestamp.desc()).paginate(
page, per_page=current_app.config['XPMALL_MANAGE_GOODS_PER_PAGE'])
else:
pagination = Goods.query.filter_by(category_id=category_id).order_by(Goods.timestamp.desc()).paginate(
page, per_page=current_app.config['XPMALL_MANAGE_GOODS_PER_PAGE'])
goods = pagination.items
return render_template('admin/goods/manage_goods.html', page=page, pagination=pagination, goods=goods)
@admin_module.route('/manage/goods/new', methods=['GET', 'POST'])
@login_required
def new_goods():
form = GoodsForm()
if form.validate_on_submit():
title = form.title.data
order_id = form.order_id.data
thumb = form.thumb.data
intro = form.intro.data
body = form.body.data
category = GoodsCategory.query.get(form.category.data)
tags = form.tags.data.replace(",",",")
price = form.price.data
course = Goods(title=title, body=body, category=category, intro=intro, thumb=thumb, tags_list=tags,
order_id=order_id, total_price=price)
tags_list_id = add_tags(tags)
# same with:
# category_id = form.category.data
# post = Post(title=title, body=body, category_id=category_id)
course.tags = tags_list_id
db.session.add(course)
db.session.commit()
# flash('Post created.', 'success')
# return redirect(url_for('blog.show_post', post_id=post.id))
return jsonify({"course_id":course.id})
elif request.method == 'POST' and form.errors:
return jsonify(form.errors)
return render_template('admin/goods/new_goods.html', form=form)
@admin_module.route('/goods/<int:goods_id>/edit', methods=['GET', 'POST'])
@login_required
def edit_goods(goods_id):
form = GoodsForm()
goods = Goods.query.get_or_404(goods_id)
if form.validate_on_submit():
goods.title = form.title.data
goods.intro = form.intro.data
goods.order_id = form.order_id.data
goods.body = form.body.data
goods.thumb = form.thumb.data
goods.category = GoodsCategory.query.get(form.category.data)
tags = form.tags.data.replace(" ","").replace(",", ",")
goods.tags_list = tags
tags_list_id = add_tags(tags)
for item in goods.tags:
if tags.find(item.name) == -1:
goods.tags.remove(item)
print("-------------------")
goods.tags = tags_list_id
goods.price = form.price.data
db.session.commit()
# flash('Post updated.', 'success')
# return redirect(url_for('blog.show_post', post_id=post.id))
elif form.errors:
print("************")
print(form.errors)
form.title.data = goods.title
form.order_id.data = goods.order_id
form.intro.data = goods.intro
form.body.data = goods.body
form.thumb.data = goods.thumb
thumb = str(goods.thumb)
form.category.data = goods.category_id
form.tags.data = goods.tags_list
form.video_url.data = goods.video_url
form.price.data = goods.price
return render_template('admin/goods/edit_goods.html', form=form, thumb=thumb)
@admin_module.route('/goods/delete/<int:goods_id>', methods=['POST'])
@login_required
def delete_goods(goods_id):
goods = Goods.query.get_or_404(goods_id)
db.session.delete(goods)
db.session.commit()
# return redirect_back()
return "ok"
@admin_module.route("/manage/course/delete", methods=['POST'])
@login_required
def batch_delete_course():
ids = request.form.getlist("checkID")
print(list(request.form.lists()))
print(ids)
delete = tags_goods.delete().where(tags_goods.c.course_id.in_(ids))
db.get_engine().connect().execute(delete)
Goods.query.filter(Goods.id.in_(ids)).delete(synchronize_session="fetch")
GoodsComments.query.filter(GoodsComments.course_id.in_(ids)).delete(synchronize_session="fetch")
# print(dir(tags_articles))
db.session.commit()
return "ok"
@admin_module.route('/manage/commment/set/<int:course_id>', methods=['POST'])
@login_required
def set_comment(course_id):
course = Goods.query.get_or_404(course_id)
if course.can_comment:
course.can_comment = False
flash('Comment disabled.', 'success')
else:
course.can_comment = True
flash('Comment enabled.', 'success')
db.session.commit()
# return redirect_back()
return "ok"
@admin_module.route('/manage/comment/')
@login_required
def manage_comment():
filter_rule = request.args.get('filter', 'all') # 'all', 'unreviewed', 'admin'
page = request.args.get('page', 1, type=int)
per_page = current_app.config['XPCMS_COMMENT_PER_PAGE']
if filter_rule == 'unread':
filtered_comments = GoodsComments.query.filter_by(reviewed=False)
elif filter_rule == 'admin':
filtered_comments = GoodsComments.query.filter_by(from_admin=True)
else:
filtered_comments = GoodsComments.query
pagination = filtered_comments.order_by(GoodsComments.timestamp.desc()).paginate(page, per_page=per_page)
comments = pagination.items
return render_template('article/admin/article/manage_comment.html', comments=comments, pagination=pagination)
@admin_module.route('/manage/comment/approve/<int:comment_id>', methods=['POST'])
@login_required
def approve_comment(comment_id):
comment = GoodsComments.query.get_or_404(comment_id)
comment.reviewed = True
db.session.commit()
flash('Comment published.', 'success')
return redirect_back()
@admin_module.route('/manage/comment/delete/<int:comment_id>', methods=['POST'])
@login_required
def delete_comment(comment_id):
comment = GoodsComments.query.get_or_404(comment_id)
db.session.delete(comment)
db.session.commit()
flash('Comment deleted.', 'success')
# return redirect_back()
return "ok"
@admin_module.route("/manage/comment/delete", methods=['POST'])
@login_required
def batch_delete_comment():
print("------"*10)
ids = request.form.getlist("checkID[]")
print(ids)
GoodsComments.query.filter(GoodsComments.id.in_(ids)).delete(synchronize_session="fetch")
db.session.commit()
return "ok"
def add_tags(tags):
tags_list = tags.split(",")
tags_list_id = []
for tag in tags_list:
exit_tag = db.session.query(GoodsTags).filter_by(name=tag).one_or_none()
if not exit_tag:
new_tag = GoodsTags(name=tag)
db.session.add(new_tag)
db.session.commit()
if new_tag.id:
tags_list_id.append(new_tag)
else:
tags_list_id.append(exit_tag)
return tags_list_id
|
<reponame>sergej-C/dl_utils<gh_stars>0
import os
from os import listdir
from os.path import isfile, join
from shutil import copyfile, copy
from glob import glob
import numpy as np
def test():
print 'test'
def mkdir_ifnotexists(path):
"""
create a folder with the specified path if not exists
"""
if not os.path.exists(path):
mkdirs(path)
def ls(path, only_files=False):
if only_files==True:
fs = [f for f in listdir(path) if isfile(join(path, f))]
else:
fs = [f for f in listdir(path)]
print fs
return fs
def create_sample_dirs(competition_path):
current_dir = os.getcwd()
os.chdir(competition_path)
# Create directories
mkdirs('data/valid')
mkdirs('results')
mkdirs('data/sample/train')
mkdirs('data/sample/test')
mkdirs('data/sample/valid')
mkdirs('data/sample/results')
mkdirs('test/unknown')
# back to cwdK
os.chdir(current_dir)
def mkdirs(directory_name):
try:
os.makedirs(directory_name)
except OSError as err:
if err.errno!=17:
raise
def cp_random_file_to_dir(from_dir, to_dir, size=-1, ext='jpg', move=False):
current_dir = os.getcwd()
print ("cwd: "+current_dir)
os.chdir(from_dir)
print ("going to :"+from_dir)
print ("copy to :"+to_dir)
g = glob('*.'+ext)
if size==-1:
size=len(g)
shuf = np.random.permutation(g)
if move==False:
for i in range(size): copyfile(shuf[i], to_dir + '/' + shuf[i])
else:
for i in range(size): os.rename(shuf[i], to_dir + '/' + shuf[i])
os.chdir(current_dir)
def create_sample_sets(data_path, val_size, train_sample_size, val_sample_size):
"""
create validation set and training + validation sample sets
training data is in data_path/train
validation data is taken from data_path/train
data_path/valid is created
data_path/sample/train and sample/valid are created
"""
create_sample_dirs(data_path + '/..')
# move data from training to validation dir
cp_random_file_to_dir(data_path+ '/train', data_path+'/valid', move=True, size=val_size )
# create a sample for training from train dir
cp_random_file_to_dir(data_path+ '/train', data_path+'/sample/train/', move=False, size=train_sample_size )
# create a sample for validation from valid dir
cp_random_file_to_dir(data_path+ '/valid', data_path+'/sample/valid/', move=False, size=val_sample_size )
def mvfiles(expr, to_dir):
for file in glob(expr):
copy(file, to_dir)
def create_dir_for_cat(cat, path, ext='jpg'):
"""
create a folder with name cat + 's' and puts them all files with name cat.*.ext taken from path
"""
os.chdir(path)
mkdirs(cat + 's')
mvfiles(cat+'.*.'+ext, cat+'s')
def create_categories_folders(data_path, categories, ext='jpg'):
"""
create folder for every category in categories (i.e. [cat, dog])
in data_path/sample/train sample/valid valid train
puts all files from these directories to the new ones
files are search with wildcard such that the prefix is equal to the category name
i.e. cat.*.ext to cats folder...
"""
current_dir = os.getcwd()
for c in categories:
create_dir_for_cat(c, data_path + '/sample/train', ext=ext)
create_dir_for_cat(c, data_path + '/sample/valid', ext=ext)
create_dir_for_cat(c, data_path + '/valid', ext=ext)
create_dir_for_cat(c, data_path + '/train', ext=ext)
os.chdir(data_path + '/test')
mvfiles('*.'+ext, 'unknown/')
os.chdir(current_dir)
#from http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
#from http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
import bcolz
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def array_to_ind(array):
return dict(zip(array, xrange(len(array))))
|
<reponame>jowage58/cassyy
"""
Central Authentication Service (CAS) client
"""
import dataclasses
import logging
import urllib.parse
import urllib.request
import xml.etree.ElementTree
from typing import Dict, Optional, Union
logger = logging.getLogger(__name__)
def _fetch_url(url: str, timeout: float = 10.0) -> bytes:
with urllib.request.urlopen(url, timeout=timeout) as f:
return f.read()
class CASError(Exception):
def __init__(self, error_code: str, *args) -> None:
super().__init__(error_code, *args)
self.error_code = error_code
class CASInvalidServiceError(CASError):
def __init__(self, error_code: str, *args) -> None:
super().__init__(error_code, *args)
class CASInvalidTicketError(CASError):
def __init__(self, error_code: str, *args) -> None:
super().__init__(error_code, *args)
@dataclasses.dataclass
class CASUser:
userid: str
attributes: Dict[str, str] = dataclasses.field(default_factory=dict)
def asdict(self) -> Dict[str, Union[str, Dict[str, str]]]:
return dataclasses.asdict(self)
class CASClient:
CAS_NS = {'cas': 'http://www.yale.edu/tp/cas'}
CAS_VALIDATE_ENCODING = 'utf-8'
CAS_VALIDATE_TIMEOUT = 10.0
def __init__(
self,
login_url: str,
logout_url: str,
validate_url: str,
) -> None:
self.login_url = login_url
self.logout_url = logout_url
self.validate_url = validate_url
@classmethod
def from_base_url(
cls,
base_url: str,
*,
login_path: str = '/login',
logout_path: str = '/logout',
validate_path: str = '/p3/serviceValidate',
) -> 'CASClient':
return cls(
login_url=urllib.parse.urljoin(base_url, login_path),
logout_url=urllib.parse.urljoin(base_url, logout_path),
validate_url=urllib.parse.urljoin(base_url, validate_path),
)
def validate(
self,
service_url: str,
ticket: str,
*,
timeout: Optional[float] = None,
**kwargs
) -> CASUser:
if timeout is None:
timeout = self.CAS_VALIDATE_TIMEOUT
target_validate = self.build_validate_url(service_url, ticket, **kwargs)
logger.debug('Validating %s', target_validate)
try:
resp_data = _fetch_url(target_validate, timeout=timeout)
resp_text = resp_data.decode(self.CAS_VALIDATE_ENCODING)
except Exception as exc:
raise CASError(repr(exc))
else:
logger.debug('Response:\n%s', resp_text)
return self.parse_cas_response(resp_text)
def build_login_url(
self,
service: str,
*,
callback_post: bool = False,
**kwargs,
) -> str:
params = {'service': service, **kwargs}
if callback_post:
params['method'] = 'POST'
qs = urllib.parse.urlencode(params)
return f'{self.login_url}?{qs}'
def build_logout_url(self, service: Optional[str] = None, **kwargs) -> str:
if service is None:
if not kwargs:
return self.logout_url
params = kwargs
else:
params = {'service': service, **kwargs}
qs = urllib.parse.urlencode(params)
return f'{self.logout_url}?{qs}'
def build_validate_url(self, service: str, ticket: str, **kwargs) -> str:
params = {'service': service, 'ticket': ticket, **kwargs}
qs = urllib.parse.urlencode(params)
return f'{self.validate_url}?{qs}'
def parse_cas_response(self, cas_response: str) -> CASUser:
try:
root = xml.etree.ElementTree.fromstring(cas_response)
except Exception as exc:
raise CASError('INVALID_RESPONSE', repr(exc)) from exc
else:
return self.parse_cas_xml(root)
def parse_cas_xml(self, root: xml.etree.ElementTree.Element) -> CASUser:
user_elem = root.find('cas:authenticationSuccess/cas:user',
self.CAS_NS)
if user_elem is not None:
attr_elem = root.find('cas:authenticationSuccess/cas:attributes',
self.CAS_NS)
return self.parse_cas_xml_user(user_elem, attr_elem)
self.parse_cas_xml_error(root)
def parse_cas_xml_user(
self,
user_elem: xml.etree.ElementTree.Element,
attr_elem: Optional[xml.etree.ElementTree.Element],
) -> CASUser:
cas_user = CASUser(userid=user_elem.text)
if attr_elem is not None:
tag_ns = '{' + self.CAS_NS['cas'] + '}'
for e in attr_elem:
attr_name = e.tag.replace(tag_ns, '', 1)
cas_user.attributes[attr_name] = e.text
return cas_user
def parse_cas_xml_error(
self,
root: xml.etree.ElementTree.Element,
) -> None:
error_code = 'Unknown'
error_elem = root.find('cas:authenticationFailure', self.CAS_NS)
if error_elem is not None:
error_code = error_elem.attrib.get('code', error_code)
error_text = error_elem.text
if error_code == 'INVALID_TICKET':
raise CASInvalidTicketError(error_code, error_text)
elif error_code == 'INVALID_SERVICE':
raise CASInvalidServiceError(error_code, error_text)
raise CASError(error_code)
def __repr__(self) -> str:
return (
'CASClient('
f'login_url={self.login_url!r}, '
f'logout_url={self.logout_url!r}, '
f'validate_url={self.validate_url!r}'
')'
)
|
<filename>protlearn/features/moran.py
# Author: <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
from ..utils.validation import check_input, check_alpha, check_natural
import pkg_resources
PATH = pkg_resources.resource_filename(__name__, 'data/')
# default indices of AAIndex1 (Xiao et al., 2015)
default = ['CIDH920105', 'BHAR880101', 'CHAM820101', 'CHAM820102',
'CHOC760101', 'BIGC670101', 'CHAM810101', 'DAYM780201']
def moran(X, *, d=1, properties=default, start=1, end=None):
"""Moran's I based on AAIndex1.
Moran's I autocorrelation descriptors are defined based on the distribution
of AAIndex1-based amino acid properties along the sequence. All indices are
standardized before computing the descriptors. For the exact formula, please
refer to the documentation at https://protlearn.readthedocs.io/.
Parameters
----------
X : string, fasta, or a list thereof
Dataset of amino acid sequences.
properties : list
List of strings denoting AAIndex1 indices.
d : int, default=1
Represents the lag. Must be smaller than sequence length. Maximum: 30.
start : int, default=1
Determines the starting point of the amino acid sequence. This number is
based on one-based indexing.
end : int, default=None
Determines the end point of the amino acid sequence. Similarly to start,
this number is based on one-based indexing.
Returns
-------
arr : ndarray of shape (n_samples, n_properties)
Array containing Moran's I autocorrelation descriptors.
References
----------
<NAME>. (1950). Notes on Continuous Stochastic Phenomena. Biometrika,
37(1/2), 17-23. doi:10.2307/2332142
<NAME>. (1988): Prediction of protein helix content from an
autocorrelation analysis of sequence hydrophobicities. Biopolymers 27,
451–477. 10.1002/bip.360270308
Li et al. (2007). Beyond Moran's I: testing for spatial
dependence based on the spatial autoregressive model. Geogr. Anal. 39,
357–375.
Xiao et al. (2015). protr/ProtrWeb: R package and web server for generating
various numerical representation schemes of protein sequences.
Bioinformatics 31 (11), 1857-1859
Examples
--------
>>> from protlearn.features import moran
>>> seqs = ['ARKLY', 'EERKPGL']
>>> moranI = moran(seqs)
>>> moranI
array([[ 0.40090094, -0.31240708, -0.44083728, 0.26720303, -0.45198768,
-0.14684112, -0.05212843, 0.33703981],
[-0.0588976 , -0.36033526, 0.13170834, 0.18317369, 0.3884609 ,
-0.00724234, -0.19231646, 0.61711506]])
"""
# input handling
X = check_input(X)
min_len = min([len(seq) for seq in X])
if d > 30:
raise ValueError('Maximum lag parameter is 30!')
if d >= min_len:
raise ValueError('Lag parameter d must be smaller than sequence length!')
# load data
df = pd.read_csv(PATH+'aaindex1.csv').set_index('Description')
df = df.reindex(sorted(df.columns), axis=1)
data = np.asarray(df.loc[default])
# list of amino acids (IUPAC standard)
amino_acids = 'ACDEFGHIKLMNPQRSTVWY'
aadict = {amino_acids[i]: i for i in range(20)}
# standardization
for i in range(data.shape[0]):
data[i,:] = [(j-np.mean(data[i,:]))/np.std(data[i,:]) for j in data[i,:]]
# calculate Moran's I
arr = np.zeros((len(X), len(default)))
for i, seq in enumerate(X):
check_alpha(seq) # check if alphabetical
check_natural(seq) # check for unnatural amino acids
seq = seq[start-1:end] # positional information
eq1 = 1/(len(seq)-d)
for j in range(len(default)):
p = [data[j, aadict[aa]] for aa in seq]
p_prime = sum(p)/len(seq)
eq2 = sum([(p[i]-p_prime)*(p[i+d]-p_prime) for i in range(len(seq)-d)])
eq3 = sum([(p[i]-p_prime)**2 for i in range(len(seq))])
arr[i,j] = (eq1*eq2)/((1/len(seq))*eq3)
return arr |
import sys
sys.path.extend(['..'])
import tensorflow as tf
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from generator.generate_code import *
from nltk.translate.bleu_score import corpus_bleu
from config.config import *
from base.BaseModel import *
from utils.tokenizer import *
def evaluate_model(input_path, model_path, tokenizer, max_length=48, display=False):
'''
Evaluate model by comparing actual vs predictions via the BLEU scoring criteria
:param input_path: input path containing images + gui code pairs to evaluate model on
:param model_path: path to model files
:param tokenizer: a Keras Tokenizer object fit on vocab
:param max_length: context length
:param display: bool on whether to print out DSL code predictions and actual labels to standard output
:return: 4-ngram BLEU score, list of actual DSL code, list of predicted DSL code
'''
model_json_path = glob.glob(os.path.join(model_path, '*.json'))[0]
model_weights_path = glob.glob(os.path.join(model_path, '*.hdf5'))[0]
with open(model_json_path, 'r') as fh:
model_json = fh.read()
model = model_from_json(model_json)
model.load_weights(model_weights_path)
print('Successfully loaded model and model weights...')
images, texts = load_data(input_path)
actual, predictions = list(), list()
for i in range(len(texts)):
predicted_code = generate_code(model, images[i], tokenizer, max_length, display)
# store actual and predicted
if display:
print('\n\nActual---->\n\n' + texts[i])
actual.append([texts[i].split()])
predictions.append(predicted_code.split())
bleu = corpus_bleu(actual, predictions)
return bleu, actual, predictions
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) != 1:
print('Need to supply an argument specifying model path')
exit(0)
model_path = argv[0]
test_dir = '../data/test/'
# model_path = '../results/'
vocab_path = '../data/code.vocab'
tokenizer = tokenizer(vocab_path)
bleu, actual, predictions = evaluate_model(test_dir, model_path, tokenizer, CONTEXT_LENGTH, display=False)
# Calculate BLEU score (standard is 4-gram, but just get all individual N-Gram BLEU scores from 1 gram to 4 gram)
# By default, the sentence_bleu() and corpus_bleu() scores calculate the cumulative 4-gram BLEU score, also called BLEU-4.
# It is common to report the cumulative BLEU-1 to BLEU-4 scores when describing the skill of a text generation system.
# 4-gram is the most strict and corresponds the best to human translations
print('BLEU-1: %f' % corpus_bleu(actual, predictions, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predictions, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predictions, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predictions, weights=(0.25, 0.25, 0.25, 0.25)))
bleu_score_path = os.path.join(model_path, 'bleu_score.txt')
with open(bleu_score_path, 'w') as fh:
fh.write('Test set dir: %s\n' % test_dir)
fh.write('BLEU-1: %f \n' % corpus_bleu(actual, predictions, weights=(1.0, 0, 0, 0)))
fh.write('BLEU-2: %f \n' % corpus_bleu(actual, predictions, weights=(0.5, 0.5, 0, 0)))
fh.write('BLEU-3: %f \n' % corpus_bleu(actual, predictions, weights=(0.3, 0.3, 0.3, 0)))
fh.write('BLEU-4: %f \n' % corpus_bleu(actual, predictions, weights=(0.25, 0.25, 0.25, 0.25)))
|
<reponame>aisportsbets/pygrid
from base64 import b64encode, b64decode
from json import dumps
from syft import deserialize
from syft.core.store.storeable_object import StorableObject
from syft.core.store import Dataset
from syft.core.common import UID
from flask import current_app as app
import torch as th
import pytest
import jwt
import os
from src.main.core.database.store_disk import (
DiskObjectStore,
create_storable,
)
from src.main.core.database.bin_storage.metadata import StorageMetadata, get_metadata
from src.main.core.database.bin_storage.bin_obj import BinObject
from src.main.core.datasets.dataset_ops import create_dataset
from src.main.core.database import *
ENCODING = "UTF-8"
JSON_DECODE_ERR_MSG = (
"Expecting property name enclosed in " "double quotes: line 1 column 2 (char 1)"
)
owner_role = ("Owner", True, True, True, True, True, True, True)
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
user1 = (
"<EMAIL>",
"BDEB6E8EE39B6C70835993486C9E65DC",
"]GBF[R>GX[9Cmk@DthFT!mhloUc%[f",
"fd062d885b24bda173f6aa534a3418bcafadccecfefe2f8c6f5a8db563549ced",
1,
)
user2 = (
"<EMAIL>",
"2amt5MXKdLhEEL8FiQLcl8Mp0FNhZI6",
"$2b$12$rj8MnLcKBxAgL7GUHrYn6O",
"acfc10d15d7ec9f7cd05a312489af2794619c6f11e9af34671a5f33da48c1de2",
2,
)
user3 = (
"<EMAIL>",
"2amt5MXKdLhEEL8FiQLcl8Mp0FNhZI6",
"$2b$12$rj8MnLcKBxAgL7GUHrYn6O",
"acfc10d15d7ec9f7cd05a312489af2794619c6f11e9af34671a5f33da48c1de2",
3,
)
storable = create_storable(
_id=UID(),
data=th.Tensor([1, 2, 3, 4]),
description="Dummy tensor",
tags=["dummy", "tensor"],
)
storable2 = create_storable(
_id=UID(),
data=th.Tensor([-1, -2, -3, -4]),
description="Negative Dummy tensor",
tags=["negative", "dummy", "tensor"],
)
storable3 = create_storable(
_id=UID(),
data=th.Tensor([11, 22, 33, 44]),
description="NewDummy tensor",
tags=["new", "dummy", "tensor"],
)
tensor1 = {
"content": "1, 2, 3, 4\n10, 20, 30, 40",
"manifest": "Suspendisse et fermentum lectus",
"description": "Dummy tensor",
"tags": ["dummy", "tensor"],
}
tensor2 = {
"content": "-1, -2, -3, -4,\n-100, -200, -300, -400",
"manifest": "Suspendisse et fermentum lectus",
"description": "Negative Dummy tensor",
"tags": ["negative", "dummy", "tensor"],
}
tensor3 = {
"content": "11, 22, 33, 44\n111, 222, 333, 444",
"manifest": "Aenean at dictum ipsum",
"description": "NewDummy tensor",
"tags": ["new", "dummy", "tensor"],
}
dataset = {
"name": "Dummy Dataset",
"description": "Neque porro quisquam",
"manifest": "Sed vehicula mauris non turpis sollicitudin congue.",
"tags": ["#hashtag", "#dummy", "#original"],
"created_at": "05/12/2018",
"tensors": {"train": tensor1.copy(), "test": tensor2.copy()},
}
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(BinObject).delete()
database.session.query(JsonObject).delete()
database.session.query(StorageMetadata).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_dataset(client, database, cleanup):
new_role = create_role(*owner_role)
database.session.add(new_role)
new_role = create_role(*user_role)
database.session.add(new_role)
new_user = create_user(*user1)
database.session.add(new_user)
new_user = create_user(*user2)
database.session.add(new_user)
database.session.commit()
token = jwt.encode({"id": 1}, app.config["SECRET_KEY"])
headers = {
"token": token.decode("UTF-8"),
}
folder = os.path.dirname(__file__)
file1 = open(f"{folder}/mtcars_train.csv", "rb")
file1 = file1.read().decode("utf-8")
file2 = open(f"{folder}/mtcars_test.csv", "rb")
file2 = file2.read().decode("utf-8")
payload = {
"name": "Cars dataset",
"description": " ... ",
"manifest": "Columns: mpg,cyl,disp,hp,drat,wt,qsec,vs,am,gear,carb",
"tags": ["#hashtag", "#diabetes"],
"created_at": "05/12/2020",
"tensors": {
"train": {"content": file1, "manifest": ""},
"test": {"content": file2, "manifest": ""},
},
}
result = client.post(
"/data-centric/datasets",
headers=headers,
data=dumps(payload),
content_type="multipart/form-data",
)
assert result.status_code == 200
_id = result.get_json().get("id", None)
storables = (
database.session.query(DatasetGroup.bin_object).filter_by(dataset=_id).all()
)
assert storables is not None
assert len(storables) == 2
storables = [el[0] for el in storables]
assert database.session.query(BinObject).get(storables[0]) is not None
assert database.session.query(BinObject).get(storables[1]) is not None
assert database.session.query(JsonObject).get(_id) is not None
assert database.session.query(JsonObject).get(_id).binary is not None
_json = database.session.query(JsonObject).get(_id).binary
assert _json["id"] == _id
assert _json["tags"] == payload["tags"]
assert _json["manifest"] == payload["manifest"]
assert _json["created_at"] == payload["created_at"]
def test_get_all_datasets_metadata(client, database, cleanup):
new_role = create_role(*owner_role)
database.session.add(new_role)
new_role = create_role(*user_role)
database.session.add(new_role)
new_user = create_user(*user1)
database.session.add(new_user)
new_user = create_user(*user2)
database.session.add(new_user)
database.session.commit()
new_dataset = {
"name": "Dummy Dataset 1",
"description": "Lorem ipsum dolor",
"manifest": "Etiam vestibulum velit a tellus aliquet varius",
"tags": ["#hashtag", "#dummy"],
"created_at": "05/12/2019",
"tensors": {"train": tensor2.copy()},
}
storage = DiskObjectStore(database)
df_json1 = create_dataset(dataset)
df_json2 = create_dataset(new_dataset)
token = jwt.encode({"id": 1}, app.config["SECRET_KEY"])
headers = {
"token": token.decode("UTF-8"),
}
result = client.get(
"/data-centric/datasets", headers=headers, content_type="application/json"
)
assert result.status_code == 200
assert df_json1["id"] in [el["id"] for el in result.get_json()]
assert df_json1["description"] in [el["description"] for el in result.get_json()]
assert df_json1["manifest"] in [el["manifest"] for el in result.get_json()]
assert df_json2["id"] in [el["id"] for el in result.get_json()]
assert df_json2["description"] in [el["description"] for el in result.get_json()]
assert df_json2["manifest"] in [el["manifest"] for el in result.get_json()]
def test_get_specific_dataset_metadata(client, database, cleanup):
new_role = create_role(*owner_role)
database.session.add(new_role)
new_role = create_role(*user_role)
database.session.add(new_role)
new_user = create_user(*user1)
database.session.add(new_user)
new_user = create_user(*user2)
database.session.add(new_user)
database.session.commit()
storage = DiskObjectStore(database)
df_metadata = create_dataset(dataset)
token = jwt.encode({"id": 1}, app.config["SECRET_KEY"])
headers = {
"token": token.decode("UTF-8"),
}
result = client.get(
"/data-centric/datasets/{}".format(df_metadata["id"]),
headers=headers,
content_type="application/json",
)
assert result.status_code == 200
assert result.get_json()["id"] == df_metadata["id"]
assert result.get_json()["tags"] == df_metadata["tags"]
assert result.get_json()["name"] == df_metadata["name"]
assert result.get_json()["manifest"] == df_metadata["manifest"]
assert result.get_json()["description"] == df_metadata["description"]
def test_update_dataset(client, database, cleanup):
new_role = create_role(*owner_role)
database.session.add(new_role)
new_role = create_role(*user_role)
database.session.add(new_role)
new_user = create_user(*user1)
database.session.add(new_user)
new_user = create_user(*user2)
database.session.add(new_user)
database.session.commit()
new_dataset = {
"name": "Dummy Dataset 1",
"description": "Lorem ipsum dolor",
"manifest": "Etiam vestibulum velit a tellus aliquet varius",
"tags": ["#tensor", "#dummy1"],
"created_at": "19/06/1972",
"tensors": {"train": tensor2.copy()},
}
storage = DiskObjectStore(database)
df_json1 = create_dataset(dataset)
token = jwt.encode({"id": 1}, app.config["SECRET_KEY"])
headers = {
"token": token.decode("UTF-8"),
}
assert (
database.session.query(DatasetGroup).filter_by(dataset=df_json1["id"]).all()
is not None
)
assert database.session.query(JsonObject).get(df_json1["id"]) is not None
assert database.session.query(JsonObject).get(df_json1["id"]).binary == df_json1
result = client.put(
"/data-centric/datasets/{}".format(df_json1["id"]),
data=dumps(new_dataset),
headers=headers,
content_type="application/json",
)
assert result.status_code == 200
assert result.get_json()["id"] == df_json1["id"]
assert (
database.session.query(DatasetGroup).filter_by(dataset=df_json1["id"]).all()
is not None
)
assert (
len(
database.session.query(DatasetGroup).filter_by(dataset=df_json1["id"]).all()
)
== 1
)
assert database.session.query(JsonObject).get(df_json1["id"]) is not None
metadata = database.session.query(JsonObject).get(df_json1["id"])
assert metadata is not None
metadata = metadata.binary
assert metadata["description"] == new_dataset["description"]
assert metadata["manifest"] == new_dataset["manifest"]
assert metadata["created_at"] == new_dataset["created_at"]
assert metadata["tags"] == new_dataset["tags"]
assert metadata["name"] == new_dataset["name"]
def test_delete_dataset(client, database, cleanup):
new_role = create_role(*owner_role)
database.session.add(new_role)
new_role = create_role(*user_role)
database.session.add(new_role)
new_user = create_user(*user1)
database.session.add(new_user)
new_user = create_user(*user2)
database.session.add(new_user)
database.session.commit()
storage = DiskObjectStore(database)
df_json1 = create_dataset(dataset)
_id = df_json1["id"]
token = jwt.encode({"id": 1}, app.config["SECRET_KEY"])
headers = {
"token": token.decode("UTF-8"),
}
assert database.session.query(DatasetGroup).filter_by(dataset=_id).all() is not None
storable_ids = (
database.session.query(DatasetGroup.bin_object).filter_by(dataset=_id).all()
)
storable_ids = [x[0] for x in storable_ids]
assert database.session.query(JsonObject).get(_id) is not None
assert database.session.query(JsonObject).get(_id).binary is not None
assert (
database.session.query(JsonObject).get(_id).binary["description"]
== dataset["description"]
)
assert database.session.query(JsonObject).get(_id).binary["tags"] == dataset["tags"]
result = client.delete(
"/data-centric/datasets/{}".format(_id),
headers=headers,
content_type="application/json",
)
assert result.status_code == 204
for strbl_id in storable_ids:
assert db.session.query(BinObject).filter_by(id=strbl_id).first() is None
assert db.session.query(ObjectMetadata).filter_by(id=strbl_id).first() is None
assert database.session.query(DatasetGroup).filter_by(dataset=_id).all() == []
assert database.session.query(JsonObject).get(_id) is None
|
<reponame>py-az-cli/py-az-cli<gh_stars>0
'''
Manage kusto pool with synapse
'''
from .... pyaz_utils import _call_az
def list_sku(name, resource_group, workspace_name):
'''
Returns the SKUs available for the provided resource.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-sku", locals())
def list(resource_group, workspace_name):
'''
List all Kusto pools.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list", locals())
def show(name, resource_group, workspace_name):
'''
Gets a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool show", locals())
def delete(name, resource_group, workspace_name, no_wait=None, yes=None):
'''
Deletes a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az synapse kusto pool delete", locals())
def list_follower_database(name, resource_group, workspace_name):
'''
Returns a list of databases that are owned by this Kusto Pool and were followed by another Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-follower-database", locals())
def list_language_extension(name, resource_group, workspace_name):
'''
Returns a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
'''
return _call_az("az synapse kusto pool list-language-extension", locals())
def start(name, resource_group, workspace_name, no_wait=None):
'''
Starts a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool start", locals())
def stop(name, resource_group, workspace_name, no_wait=None):
'''
Stops a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool stop", locals())
def wait(name, resource_group, workspace_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the synapse kusto pool is met.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az synapse kusto pool wait", locals())
def create(name, resource_group, sku, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, if_none_match=None, location=None, no_wait=None, optimized_autoscale=None, tags=None, workspace_uid=None):
'''
Create a Kusto pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- sku -- The SKU of the kusto pool.
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- if_none_match -- Set to '*' to allow a new Kusto Pool to be created, but to prevent updating an existing Kusto Pool. Other values will result in a 412 Pre-condition Failed response.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool create", locals())
def update(name, resource_group, workspace_name, enable_purge=None, enable_streaming_ingest=None, if_match=None, no_wait=None, optimized_autoscale=None, sku=None, tags=None, workspace_uid=None):
'''
Update a Kusto Kusto Pool.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- enable_purge -- A boolean value that indicates if the purge operations are enabled.
- enable_streaming_ingest -- A boolean value that indicates if the streaming ingest is enabled.
- if_match -- The ETag of the Kusto Pool. Omit this value to always overwrite the current Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
- no_wait -- Do not wait for the long-running operation to finish.
- optimized_autoscale -- Optimized auto scale definition.
- sku -- The SKU of the kusto pool.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- workspace_uid -- The workspace unique identifier.
'''
return _call_az("az synapse kusto pool update", locals())
def add_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Add a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool add-language-extension", locals())
def detach_follower_database(attached_database_configuration_name, kusto_pool_resource_id, name, resource_group, workspace_name, no_wait=None):
'''
Detaches all followers of a database owned by this Kusto Pool.
Required Parameters:
- attached_database_configuration_name -- Resource name of the attached database configuration in the follower cluster.
- kusto_pool_resource_id -- Resource id of the cluster that follows a database owned by this cluster.
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az synapse kusto pool detach-follower-database", locals())
def remove_language_extension(name, resource_group, workspace_name, no_wait=None, value=None):
'''
Remove a list of language extensions that can run within KQL queries.
Required Parameters:
- name -- The name of the Kusto pool.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The name of the workspace
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- value -- The list of language extensions.
'''
return _call_az("az synapse kusto pool remove-language-extension", locals())
|
<gh_stars>0
import random
class Igralec :
def __init__(self, ime, simbol) :
self.ime = ime
self.simbol = simbol
def Preveri_simbol(simbol1, simbol2) :
if len(simbol1) == 1 and len(simbol2) == 1 :
return True
else :
return False
class Igra :
def __init__(self, name1, name2, simbol1, simbol2) :
self.name1 = name1
self.name2 = name2
self.simbol1 = simbol1
self.simbol2 = simbol2
self.score1 = 0
self.score2 = 0
self.stevec = 0
self.TTT = [' ',' ',' ',' ',' ',' ',' ',' ',' ',' '] #10 jih je
def Kdo_zacne(self) :
if random.randint(0,1) == 0 :
return self.name1, self.name2
else :
return self.name2, self.name1
def Vstavi_simbol(self, simbol, indeks) :
self.simbol = simbol
self.indeks = int(indeks)
self.stevec += 1
self.TTT[self.indeks] = simbol
def Preveri_zmago(self) :
#check row
for k in range(1, 8, 3) :
if (self.TTT[k] == self.TTT[k+1] == self.TTT[k+2] == self.simbol1) :
self.score1 += 1
self.stevec = 0
return True
if (self.TTT[k] == self.TTT[k+1] == self.TTT[k+2] == self.simbol2) :
self.score2 += 1
self.stevec = 0
return True
#check col
for l in range(1, 4) :
if (self.TTT[l] == self.TTT[l+3] == self.TTT[l+6] == self.simbol1) :
self.score1 += 1
self.stevec = 0
return True
if (self.TTT[l] == self.TTT[l+3] == self.TTT[l+6] == self.simbol2) :
self.score2 += 1
self.stevec = 0
return True
#check diag
d = 1
if (self.TTT[d] == self.TTT[d+4] == self.TTT[d+8] == self.simbol1) or (self.TTT[d+2] == self.TTT[d+4] == self.TTT[d+6] == self.simbol1) :
self.score1 += 1
self.stevec = 0
return True
if (self.TTT[d] == self.TTT[d+4] == self.TTT[d+8] == self.simbol2) or (self.TTT[d+2] == self.TTT[d+4] == self.TTT[d+6] == self.simbol2) :
self.score2 += 1
self.stevec = 0
return True
def Preveri(self, x) :
self.x = x
if self.TTT[self.x] != ' ':
return True
else :
return False
def Preveri_rezultat(self) :
if self.stevec == 9 :
return True
else :
return False
def Igralna_povrsina(self) :
print('---------------\n',
'| ' + str(self.TTT[7]) + ' | ' + str(self.TTT[8]) + ' | ' + str(self.TTT[9]) + ' | \n',
'--------------\n',
'| ' + str(self.TTT[4]) + ' | ' + str(self.TTT[5]) + ' | ' + str(self.TTT[6]) + ' | \n',
'--------------\n',
'| ' + str(self.TTT[1]) + ' | ' + str(self.TTT[2]) + ' | ' + str(self.TTT[3]) + ' | \n',
'--------------')
def Boljsi(self) :
if self.score1 == self.score2 :
return print("Rezultat je izenačen.")
if self.score1 > self.score2 :
return print("{} vodi".format(self.name1), end=" ")
else:
return print("{} vodi".format(self.name2), end=" ")
def Rezultat(self) :
return print(" {} : {}".format(str(self.score1),str(self.score2)))
def Sprazni(self) :
for i in range(10) :
self.TTT[i] = ' '
def Zmaga(self) :
#check rows
for k in range(1, 8, 3) :
if (self.TTT[k] == self.TTT[k+1] == self.TTT[k+2] == self.simbol1) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name1))
if (self.TTT[k] == self.TTT[k+1] == self.TTT[k+2] == self.simbol2) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name2))
#check cols
for l in range(1, 4, 1) :
if (self.TTT[l] == self.TTT[l+3] == self.TTT[l+6] == self.simbol1) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name1))
if (self.TTT[l] == self.TTT[l+3] == self.TTT[l+6] == self.simbol2) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name2))
#check diag
d = 1
if (self.TTT[d] == self.TTT[d+4] == self.TTT[d+8] == self.simbol1) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name1))
if (self.TTT[d+2] == self.TTT[d+4] == self.TTT[d+6] == self.simbol1) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name1))
if (self.TTT[d] == self.TTT[d+4] == self.TTT[d+8] == self.simbol2) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name2))
if (self.TTT[d+2] == self.TTT[d+4] == self.TTT[d+6] == self.simbol2) :
return print("{} je zmagal/a. HURA HURA HURA".format(self.name2))
def Igraj_ponovno(self, odgovor) :
self.odgovor = odgovor
if odgovor in ['JA', 'ja', 'Ja']:
return True
else:
return False
|
import debug # pyflakes:ignore
import factory
import datetime
from django.conf import settings
from ietf.doc.models import Document, DocEvent, NewRevisionDocEvent, DocAlias, State, DocumentAuthor, StateDocEvent
from ietf.group.models import Group
def draft_name_generator(type_id,group,n):
return '%s-%s-%s-%s%d'%(
type_id,
'bogusperson',
group.acronym if group else 'netherwhere',
'musings',
n,
)
class BaseDocumentFactory(factory.DjangoModelFactory):
class Meta:
model = Document
title = factory.Faker('sentence',nb_words=6)
rev = '00'
std_level_id = None
intended_std_level_id = None
time = datetime.datetime.now()
expires = factory.LazyAttribute(lambda o: o.time+datetime.timedelta(days=settings.INTERNET_DRAFT_DAYS_TO_EXPIRE))
@factory.lazy_attribute_sequence
def name(self, n):
return draft_name_generator(self.type_id,self.group,n)
newrevisiondocevent = factory.RelatedFactory('ietf.doc.factories.NewRevisionDocEventFactory','doc')
alias = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document')
@factory.post_generation
def other_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for alias in extracted:
obj.docalias_set.create(name=alias)
@factory.post_generation
def states(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
@factory.post_generation
def authors(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
order = 0
for person in extracted:
DocumentAuthor.objects.create(document=obj, person=person, email=person.email(), order=order)
order += 1
@factory.post_generation
def relations(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for (rel_id,docalias) in extracted:
if isinstance(docalias,Document):
docalias = docalias.docalias_set.first()
obj.relateddocument_set.create(relationship_id=rel_id,target=docalias)
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
"""Save again the instance if creating and at least one hook ran."""
if create and results:
# Some post-generation hooks ran, and may have modified us.
obj._has_an_event_so_saving_is_allowed = True
obj.save()
#TODO remove this - rename BaseDocumentFactory to DocumentFactory
class DocumentFactory(BaseDocumentFactory):
type_id = 'draft'
group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none')
class IndividualDraftFactory(BaseDocumentFactory):
type_id = 'draft'
group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none')
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='draft',slug='active'))
class IndividualRfcFactory(IndividualDraftFactory):
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
class WgDraftFactory(BaseDocumentFactory):
type_id = 'draft'
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg')
stream_id = 'ietf'
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='draft',slug='active'))
obj.set_state(State.objects.get(type_id='draft-stream-ietf',slug='wg-doc'))
class WgRfcFactory(WgDraftFactory):
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
std_level_id = 'ps'
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
class CharterFactory(BaseDocumentFactory):
type_id = 'charter'
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg')
name = factory.LazyAttribute(lambda o: 'charter-ietf-%s'%o.group.acronym)
@factory.post_generation
def set_group_charter_document(obj, create, extracted, **kwargs):
if not create:
return
obj.group.charter = extracted or obj
obj.group.save()
class ConflictReviewFactory(BaseDocumentFactory):
type_id='conflrev'
@factory.post_generation
def review_of(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
obj.relateddocument_set.create(relationship_id='conflrev',target=extracted.docalias_set.first())
else:
obj.relateddocument_set.create(relationship_id='conflrev',target=DocumentFactory(type_id='draft',group=Group.objects.get(type_id='individ')).docalias_set.first())
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for state in extracted:
obj.set_state(state)
else:
obj.set_state(State.objects.get(type_id='conflrev',slug='iesgeval'))
class DocAliasFactory(factory.DjangoModelFactory):
class Meta:
model = DocAlias
document = factory.SubFactory('ietf.doc.factories.DocumentFactory')
@factory.lazy_attribute
def name(self):
return self.document.name
class DocEventFactory(factory.DjangoModelFactory):
class Meta:
model = DocEvent
type = 'added_comment'
by = factory.SubFactory('ietf.person.factories.PersonFactory')
doc = factory.SubFactory(DocumentFactory)
desc = factory.Faker('sentence',nb_words=6)
@factory.lazy_attribute
def rev(self):
return self.doc.rev
class NewRevisionDocEventFactory(DocEventFactory):
class Meta:
model = NewRevisionDocEvent
type = 'new_revision'
rev = '00'
@factory.lazy_attribute
def desc(self):
return 'New version available %s-%s'%(self.doc.name,self.rev)
class StateDocEventFactory(DocEventFactory):
class Meta:
model = StateDocEvent
type = 'changed_state'
state_type_id = 'draft-iesg'
@factory.post_generation
def state(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
(state_type_id, state_slug) = extracted
obj.state = State.objects.get(type_id=state_type_id,slug=state_slug)
else:
obj.state = State.objects.get(type_id='draft-iesg',slug='ad-eval')
obj.save()
|
# This Python module is part of the PyRate software package.
#
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
This Python module contains tests for the linrate.py PyRate module.
"""
import os
import shutil
import tempfile
import unittest
from numpy import eye, array, ones
import numpy as np
from numpy.testing import assert_array_almost_equal
import pyrate.core.orbital
import tests.common
from pyrate.core import shared, ref_phs_est as rpe, config as cf, covariance as vcm_module
from pyrate.core.linrate import linear_rate
from pyrate import process, prepifg, conv2tif
from tests.common import (SML_TEST_DIR, prepare_ifgs_without_phase,
TEST_CONF_ROIPAC, pre_prepare_ifgs, remove_tifs)
def default_params():
return {'pthr': 3, 'nsig': 3, 'maxsig': 2, 'parallel': 1, 'processes': 8}
class SinglePixelIfg(object):
def __init__(self, timespan, phase):
self.time_span = timespan
self.phase_data = array([[phase]])
class LinearRateTests(unittest.TestCase):
"""
Tests the weighted least squares algorithm for determinining
the best fitting velocity
"""
def setUp(self):
phase = [0.5, 3.5, 4, 2.5, 3.5, 1]
timespan = [0.1, 0.7, 0.8, 0.5, 0.7, 0.2]
self.ifgs = [SinglePixelIfg(s, p) for s, p in zip(timespan, phase)]
def test_linear_rate(self):
# Simple test with one pixel and equal weighting
exprate = array([[5.0]])
experr = array([[0.836242010007091]])
expsamp = array([[5]])
vcmt = eye(6, 6)
mst = ones((6, 1, 1))
mst[4] = 0
params = default_params()
rate, error, samples = linear_rate(self.ifgs, params, vcmt, mst)
assert_array_almost_equal(rate, exprate)
assert_array_almost_equal(error, experr)
assert_array_almost_equal(samples, expsamp)
class LegacyEqualityTest(unittest.TestCase):
"""
Tests equality with legacy data
"""
@classmethod
def setUpClass(cls):
params = cf.get_config_params(TEST_CONF_ROIPAC)
cls.temp_out_dir = tempfile.mkdtemp()
params[cf.OUT_DIR] = cls.temp_out_dir
params[cf.TMPDIR] = os.path.join(params[cf.OUT_DIR], cf.TMPDIR)
shared.mkdir_p(params[cf.TMPDIR])
conv2tif.main(params)
prepifg.main(params)
params[cf.REF_EST_METHOD] = 2
xlks, _, crop = cf.transform_params(params)
base_ifg_paths = cf.original_ifg_paths(
params[cf.IFG_FILE_LIST], params[cf.OBS_DIR])
dest_paths = cf.get_dest_paths(base_ifg_paths, crop, params, xlks)
print(f"base_ifg_paths={base_ifg_paths}")
print(f"dest_paths={dest_paths}")
# start run_pyrate copy
ifgs = pre_prepare_ifgs(dest_paths, params)
mst_grid = tests.common.mst_calculation(dest_paths, params)
refx, refy = process._ref_pixel_calc(dest_paths, params)
# Estimate and remove orbit errors
pyrate.core.orbital.remove_orbital_error(ifgs, params)
ifgs = prepare_ifgs_without_phase(dest_paths, params)
for ifg in ifgs:
ifg.close()
_, ifgs = process._ref_phase_estimation(dest_paths, params, refx, refy)
ifgs[0].open()
r_dist = vcm_module.RDist(ifgs[0])()
ifgs[0].close()
maxvar = [vcm_module.cvd(i, params, r_dist)[0] for i in dest_paths]
for ifg in ifgs:
ifg.open()
vcmt = vcm_module.get_vcmt(ifgs, maxvar)
for ifg in ifgs:
ifg.close()
ifg.open()
# Calculate linear rate map
params[cf.PARALLEL] = 1
cls.rate, cls.error, cls.samples = tests.common.calculate_linear_rate(
ifgs, params, vcmt, mst_mat=mst_grid)
params[cf.PARALLEL] = 2
cls.rate_2, cls.error_2, cls.samples_2 = \
tests.common.calculate_linear_rate(ifgs, params, vcmt,
mst_mat=mst_grid)
params[cf.PARALLEL] = 0
# Calculate linear rate map
cls.rate_s, cls.error_s, cls.samples_s = \
tests.common.calculate_linear_rate(ifgs, params, vcmt,
mst_mat=mst_grid)
linrate_dir = os.path.join(SML_TEST_DIR, 'linrate')
cls.rate_container = np.genfromtxt(
os.path.join(linrate_dir, 'stackmap.csv'), delimiter=',')
cls.error_container = np.genfromtxt(
os.path.join(linrate_dir, 'errormap.csv'), delimiter=',')
cls.samples_container = np.genfromtxt(
os.path.join(linrate_dir, 'coh_sta.csv'), delimiter=',')
for ifg in ifgs:
ifg.close()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temp_out_dir)
params = cf.get_config_params(TEST_CONF_ROIPAC)
remove_tifs(params[cf.OBS_DIR])
def test_linear_rate_full_parallel(self):
"""
python multiprocessing by rows vs serial
"""
np.testing.assert_array_almost_equal(
self.rate, self.rate_s, decimal=3)
def test_linrate_error_parallel(self):
"""
python multiprocessing by rows vs serial
"""
np.testing.assert_array_almost_equal(
self.error, self.error_s, decimal=3)
def test_linrate_samples_parallel(self):
"""
python multiprocessing by rows vs serial
"""
np.testing.assert_array_almost_equal(
self.samples, self.samples_s, decimal=3)
def test_linrate_full_parallel_pixel(self):
"""
python multiprocessing by pixel vs serial
"""
np.testing.assert_array_almost_equal(
self.rate_2, self.rate_s, decimal=3)
def test_linrate_error_parallel_pixel(self):
"""
python multiprocessing by pixel vs serial
"""
np.testing.assert_array_almost_equal(
self.error_2, self.error_s, decimal=3)
def test_linrate_samples_parallel_pixel(self):
"""
python multiprocessing pixel level vs serial
"""
np.testing.assert_array_almost_equal(
self.samples_2, self.samples_s, decimal=3)
def test_linear_rate(self):
"""
Compare with legacy data
"""
np.testing.assert_array_almost_equal(
self.rate_s, self.rate_container, decimal=3)
def test_linrate_error(self):
"""
Compare with legacy data
"""
np.testing.assert_array_almost_equal(
self.error_s, self.error_container, decimal=3)
def test_linrate_samples(self):
"""
Compare with legacy data
"""
np.testing.assert_array_almost_equal(
self.samples_s, self.samples_container, decimal=3)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
"""
Configure filebrowser service
"""
import os
import sys
import json
import bcrypt
import logging
import coloredlogs
import argparse
from urllib.parse import quote, urljoin
from subprocess import run, call
import functions as func
### Enable logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO,
stream=sys.stdout)
log = logging.getLogger(__name__)
### Enable argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--opts', type=json.loads, help='Set script arguments')
parser.add_argument('--env', type=json.loads, help='Set script environment')
parser.add_argument('--user', type=json.loads, help='Load user settings')
parser.add_argument('--settings', type=json.loads, help='Load script settings')
args, unknown = parser.parse_known_args()
if unknown:
log.error("Unknown arguments " + str(unknown))
### Load arguments
cli_opts = args.opts
cli_env = args.env
cli_user = args.user
cli_settings = args.settings
### Set log level
verbosity = cli_opts.get("verbosity")
log.setLevel(verbosity)
# Setup colored console logs
coloredlogs.install(fmt='%(asctime)s [%(levelname)s] %(message)s', level=verbosity, logger=log)
### Get envs
proxy_base_url = cli_env.get("PROXY_BASE_URL")
caddy_virtual_base_url = cli_env.get("CADDY_VIRTUAL_BASE_URL")
#fb_port = cli_env.get("FB_PORT")
#fb_base_url = cli_env.get("FB_BASE_URL")
#fb_root_dir = cli_env.get("FB_ROOT_DIR")
fb_port = cli_user.get("filebrowser").get("port")
fb_base_url = cli_user.get("filebrowser").get("base_url")
fb_root_dir = cli_user.get("filebrowser").get("root_dir")
### Get user settings
user_name = cli_user.get("name")
user_group = cli_user.get("group")
user_password = cli_user.get("password")
user_home = cli_user.get("dirs").get("home").get("path")
### Clean up envs
application = "filebrowser"
proxy_base_url = func.clean_url(proxy_base_url)
host_base_url = func.clean_url(caddy_virtual_base_url)
fb_base_url = func.clean_url(fb_base_url)
### Set final base url
system_base_url = urljoin(host_base_url, proxy_base_url)
full_base_url = urljoin(system_base_url, fb_base_url)
log.info(f"{application} base URL: '{full_base_url}'")
### Set config and data paths
config_dir = os.path.join(user_home, ".config", application)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
db_path = os.path.join(user_home, f"{application}.db")
### Generate password hash
password = <PASSWORD>()
salt = bcrypt.gensalt()
hashed_password = bcrypt.hashpw(password, salt).decode('utf-8')
log.info(f"{application} password: '{<PASSWORD>}'")
log.info(f"{application} hashed password: '{<PASSWORD>}'")
### Create config template
config_file = {
"port": fb_port,
"baseURL": full_base_url,
"address": "",
"log": "stdout",
"database": db_path,
"root": fb_root_dir,
"username": user_name,
"password": <PASSWORD>
}
### Write config file
config_path = os.path.join(config_dir, "settings.json")
config_json = json.dumps(config_file, indent = 4)
with open(config_path, "w") as f:
f.write(config_json)
# fix permissions
log.info(f"setting permissions on '{config_dir}' to '{user_name}:{user_group}'")
func.recursive_chown(config_dir, user_name, user_group)
### Display final config
log.debug(f"{application} config: '{config_path}'")
log.debug(func.capture_cmd_stdout(f'cat {config_path}', cli_env)) |
# Combo helpers independent of GUI framework - these operate on
# SelectionCallbackProperty objects.
from __future__ import absolute_import, division, print_function
import weakref
from glue.core import Data, Subset
from glue.core.hub import HubListener
from glue.core.message import (DataReorderComponentMessage,
ComponentsChangedMessage,
DataCollectionAddMessage,
DataCollectionDeleteMessage,
DataUpdateMessage,
DataRenameComponentMessage)
from glue.external.echo import delay_callback, ChoiceSeparator
from glue.external.six import string_types
__all__ = ['ComponentIDComboHelper', 'ManualDataComboHelper',
'DataCollectionComboHelper', 'ComboHelper', 'BaseDataComboHelper']
def unique_data_iter(datasets):
"""
Return a list with only Data objects, with duplicates removed, but
preserving the original order.
"""
datasets_new = []
for dataset in datasets:
if isinstance(dataset, Data):
if dataset not in datasets_new:
datasets_new.append(dataset)
else:
if dataset.data not in datasets_new:
datasets_new.append(dataset.data)
return datasets_new
class ComboHelper(HubListener):
"""
Base class for any combo helper represented by a SelectionCallbackProperty.
This stores the state and selection property and exposes the ``state``,
``selection`` and ``choices`` properties.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
"""
def __init__(self, state, selection_property):
self._state = weakref.ref(state)
self.selection_property = selection_property
@property
def state(self):
"""
The state to which the selection property belongs.
"""
return self._state()
@property
def selection(self):
"""
The current selected value.
"""
return getattr(self.state, self.selection_property)
@selection.setter
def selection(self, selection):
return setattr(self.state, self.selection_property, selection)
@property
def choices(self):
"""
The current valid choices for the combo.
"""
prop = getattr(type(self.state), self.selection_property)
return prop.get_choices(self.state)
@choices.setter
def choices(self, choices):
with delay_callback(self.state, self.selection_property):
prop = getattr(type(self.state), self.selection_property)
prop.set_choices(self.state, choices)
@property
def display(self):
"""
The current display function for the combo (the function that relates
the Python objects to the display label)
"""
prop = getattr(type(self.state), self.selection_property)
return prop.get_display_func(self.state)
@display.setter
def display(self, display):
prop = getattr(type(self.state), self.selection_property)
return prop.set_display_func(self.state, display)
def _on_rename(self, msg):
# If a component ID is renamed, we don't need to refresh because the
# list of actual component IDs is the same as before. However, we do
# need to trigger a refresh of any GUI combos that use this, so we
# make the property notify a change. However, if we are inside a
# delay_callback block, the property will not be enabled, and notify()
# won't have any effect, in which case we set the 'force_next_sync'
# option which means that when exiting from the delay_callback block,
# this property will show up as having changed
prop = getattr(type(self.state), self.selection_property)
if prop.enabled(self.state):
prop.notify(self.state, self.selection, self.selection)
else:
prop.force_next_sync(self.state)
class ComponentIDComboHelper(ComboHelper):
"""
The purpose of this class is to set up a combo (represented by a
SelectionCallbackProperty) showing componentIDs for one or more datasets, and to
update these componentIDs if needed, for example if new components are added
to a dataset, or if componentIDs are renamed. This is a GUI
framework-independent implementation.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`, optional
The data collection to which the datasets belong - if specified,
this is used to remove datasets from the combo when they are removed
from the data collection.
data : :class:`~glue.core.data.Data`, optional
If specified, set up the combo for this dataset only and don't allow
datasets to be added/removed
numeric : bool, optional
Show numeric components
categorical : bool, optional
Show categorical components
pixel_coord : bool, optional
Show pixel coordinate components
world_coord : bool, optional
Show world coordinate components
derived : bool, optional
Show derived components
none : bool or str, optional
Add an entry that means `None`. If a string, this is the display string
that will be shown for the `None` entry, otherwise an empty string is
shown.
"""
def __init__(self, state, selection_property,
data_collection=None, data=None,
numeric=True, categorical=True,
pixel_coord=False, world_coord=False, derived=True, none=False):
super(ComponentIDComboHelper, self).__init__(state, selection_property)
if isinstance(none, string_types):
self._none = True
self._none_label = none
else:
self._none = none
self._none_label = ''
def display_func_label(cid):
if cid is None:
return self._none_label
else:
return cid.label
self.display = display_func_label
self._numeric = numeric
self._categorical = categorical
self._pixel_coord = pixel_coord
self._world_coord = world_coord
self._derived = derived
if data is None:
self._manual_data = False
self._data = []
else:
self._manual_data = True
self._data = [data]
self._data_collection = data_collection
if data_collection is None:
self.hub = None
else:
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
else:
self.hub = data_collection.hub
if data is not None:
self.refresh()
def clear(self):
self._data.clear()
self.refresh()
@property
def numeric(self):
return self._numeric
@numeric.setter
def numeric(self, value):
self._numeric = value
self.refresh()
@property
def categorical(self):
return self._categorical
@categorical.setter
def categorical(self, value):
self._categorical = value
self.refresh()
@property
def pixel_coord(self):
return self._pixel_coord
@pixel_coord.setter
def pixel_coord(self, value):
self._pixel_coord = value
self.refresh()
@property
def world_coord(self):
return self._world_coord
@world_coord.setter
def world_coord(self, value):
self._world_coord = value
self.refresh()
@property
def derived(self):
return self._derived
@derived.setter
def derived(self, value):
self._derived = value
self.refresh()
@property
def none(self):
return self._none
@none.setter
def none(self, value):
if isinstance(value, string_types):
self._none = True
self._none_label = value
else:
self._none = value
self._none_label = ''
self.refresh()
def append_data(self, data, refresh=True):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if isinstance(data, Subset):
data = data.data
if self.hub is None:
if data.hub is not None:
self.hub = data.hub
elif data.hub is not self.hub:
raise ValueError("Data Hub is different from current hub")
if data not in self._data:
self._data.append(data)
if refresh:
self.refresh()
def remove_data(self, data):
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
if data in self._data:
self._data.remove(data)
self.refresh()
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
if self._manual_data:
raise Exception("Cannot change data in ComponentIDComboHelper "
"initialized from a single dataset")
try:
self._data.clear()
except AttributeError: # PY2
self._data[:] = []
for data in unique_data_iter(datasets):
self.append_data(data, refresh=False)
self.refresh()
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def refresh(self, *args):
choices = []
if self._none:
choices.append(None)
for data in self._data:
derived_components = [cid for cid in data.derived_components if cid.parent is data]
if len(self._data) > 1:
if data.label is None or data.label == '':
choices.append(ChoiceSeparator('Untitled Data'))
else:
choices.append(ChoiceSeparator(data.label))
cids = [ChoiceSeparator('Main components')]
for cid in data.primary_components:
if cid not in data.coordinate_components:
comp = data.get_component(cid)
if ((comp.numeric and self.numeric) or
(comp.categorical and self.categorical)):
cids.append(cid)
if len(cids) > 1:
if self.pixel_coord or self.world_coord or (self.derived and len(derived_components) > 0):
choices += cids
else:
choices += cids[1:]
if self.numeric and self.derived:
cids = [ChoiceSeparator('Derived components')]
for cid in derived_components:
cids.append(cid)
if len(cids) > 1:
choices += cids
if self.pixel_coord or self.world_coord:
cids = [ChoiceSeparator('Coordinate components')]
if self.pixel_coord:
cids += data.pixel_component_ids
if self.world_coord:
cids += data.world_component_ids
if len(cids) > 1:
choices += cids
self.choices = choices
def _filter_msg(self, msg):
return msg.data in self._data or msg.sender in self._data_collection
def register_to_hub(self, hub):
hub.subscribe(self, DataRenameComponentMessage,
handler=self._on_rename,
filter=lambda msg: msg.sender in self._data)
hub.subscribe(self, DataReorderComponentMessage,
handler=self.refresh,
filter=lambda msg: msg.sender in self._data)
hub.subscribe(self, ComponentsChangedMessage,
handler=self.refresh,
filter=lambda msg: msg.sender in self._data)
if self._data_collection is not None:
hub.subscribe(self, DataCollectionDeleteMessage,
handler=self._remove_data)
def _remove_data(self, msg):
self.remove_data(msg.data)
def unregister(self, hub):
hub.unsubscribe_all(self)
class BaseDataComboHelper(ComboHelper):
"""
This is a base class for helpers for combo boxes that need to show a list
of data objects.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, state, selection_property, data_collection=None):
super(BaseDataComboHelper, self).__init__(state, selection_property)
def display_func_label(cid):
return cid.label
self.display = display_func_label
self._component_id_helpers = []
self.state.add_callback(self.selection_property, self.refresh_component_ids)
self._data_collection = data_collection
if data_collection is not None:
if data_collection.hub is None:
raise ValueError("Hub on data collection is not set")
else:
self.hub = data_collection.hub
else:
self.hub = None
def refresh(self, *args):
self.choices = [data for data in self._datasets]
self.refresh_component_ids()
def refresh_component_ids(self, *args):
data = getattr(self.state, self.selection_property)
for helper in self._component_id_helpers:
helper.clear()
if data is not None:
helper.append_data(data)
helper.refresh()
def add_component_id_combo(self, combo):
helper = ComponentIDComboHelper(combo)
self._component_id_helpers.append_data(helper)
if self._data is not None:
helper.append_data(self._data)
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
if value is not None:
self.register_to_hub(value)
def register_to_hub(self, hub):
pass
def _on_data_update(self, msg):
if msg.attribute == 'label':
self._on_rename(msg)
else:
self.refresh()
class ManualDataComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is manually curated.
Datasets are added and removed using the
:meth:`~ManualDataComboHelper.append_data` and
:meth:`~ManualDataComboHelper.remove_data` methods.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection to which the datasets belong - this is needed
because if a dataset is removed from the data collection, we want to
remove it here.
"""
def __init__(self, state, selection_property, data_collection=None):
super(ManualDataComboHelper, self).__init__(state, selection_property,
data_collection=data_collection)
self._datasets = []
def set_multiple_data(self, datasets):
"""
Add multiple datasets to the combo in one go (and clear any previous datasets).
Parameters
----------
datasets : list
The list of :class:`~glue.core.data.Data` objects to add
"""
try:
self._datasets.clear()
except AttributeError: # PY2
self._datasets[:] = []
for data in unique_data_iter(datasets):
self.append_data(data, refresh=False)
self.refresh()
def append_data(self, data, refresh=True):
if data in self._datasets:
return
if self.hub is None and data.hub is not None:
self.hub = data.hub
self._datasets.append(data)
if refresh:
self.refresh()
def remove_data(self, data):
if data not in self._datasets:
return
self._datasets.remove(data)
self.refresh()
def register_to_hub(self, hub):
super(ManualDataComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=self._on_data_update,
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=lambda msg: self.remove_data(msg.data),
filter=lambda msg: msg.sender is self._data_collection)
class DataCollectionComboHelper(BaseDataComboHelper):
"""
This is a helper for combo boxes that need to show a list of data objects
that is always in sync with a :class:`~glue.core.data_collection.DataCollection`.
Parameters
----------
state : :class:`~glue.core.state_objects.State`
The state to which the selection property belongs
selection_property : :class:`~glue.external.echo.SelectionCallbackProperty`
The selection property representing the combo.
data_collection : :class:`~glue.core.data_collection.DataCollection`
The data collection with which to stay in sync
"""
def __init__(self, state, selection_property, data_collection):
super(DataCollectionComboHelper, self).__init__(state, selection_property,
data_collection=data_collection)
self._datasets = data_collection
self.refresh()
def register_to_hub(self, hub):
super(DataCollectionComboHelper, self).register_to_hub(hub)
hub.subscribe(self, DataUpdateMessage,
handler=self._on_data_update,
filter=lambda msg: msg.sender in self._datasets)
hub.subscribe(self, DataCollectionAddMessage,
handler=self.refresh,
filter=lambda msg: msg.sender is self._datasets)
hub.subscribe(self, DataCollectionDeleteMessage,
handler=self.refresh,
filter=lambda msg: msg.sender is self._datasets)
|
<filename>test/test_user_store.py
import unittest
from models.user import User, UserError
from models.user_store import UserStore
from elasticsearch import Elasticsearch
import time
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
class TestUserStore(unittest.TestCase):
def setUp(self):
self.config = {
"host": "localhost",
"port": 9200,
"user_index": "unittest-test-index"
}
self.remove_test_index() # make sure there is no previous test index
self.testname = "testname"
self.testpass = "<PASSWORD>"
self.testuser = {"username": self.testname, "password": <PASSWORD>}
self.user_store = UserStore(configuration=self.config)
def remove_test_index(self):
es = Elasticsearch([{"host": self.config['host'], "port": self.config['port']}])
if es.indices.exists(index=self.config["user_index"]):
es.indices.delete(index=self.config["user_index"])
def add_test_user(self):
user = User({"username": self.testname})
user.hash_password(<PASSWORD>)
return self.user_store.add_user_to_index(user)
def tearDown(self):
self.remove_test_index() # make sure to remove test index
def test_user_can_be_initialised(self):
self.user_store = UserStore(self.config)
def test_store_cannot_add_user_to_index_without_password(self):
user = User({"username": self.testname})
error = None
try:
self.user_store.add_user_to_index(user)
except UserError as e:
error = e
self.assertNotEqual(error, None)
self.assertEqual(error.message, "Cannot store user without a password")
def test_store_can_add_user_to_index_with_password(self):
user = User({"username": self.testname})
user.hash_password(<PASSWORD>)
self.user_store.add_user_to_index(user)
newuser = self.user_store.get_user_from_index(self.testname)
self.assertEqual(newuser.username, self.testname)
self.assertEqual(newuser.password_hash, user.password_hash)
def test_store_cannot_delete_user_from_index_without_password(self):
self.add_test_user()
user = User({"username": self.testname})
error = None
try:
self.user_store.delete_user_from_index(user)
except UserError as e:
error = e
self.assertNotEqual(error, None)
self.assertEqual(error.message, "Cannot delete user without a password")
def test_store_can_delete_user_from_index_with_password(self):
user = User({"username": self.testname})
user.hash_password(<PASSWORD>)
self.user_store.add_user_to_index(user)
self.user_store.delete_user_from_index(user)
bool_exists = self.user_store.user_exists(self.testname)
self.assertEqual(bool_exists, False)
def test_store_can_check_user_does_not_exists(self):
val = self.user_store.user_exists(self.testname)
self.assertFalse(val)
def test_store_can_check_user_does_exists(self):
user = self.add_test_user()
self.assertTrue(self.user_store.user_exists(user.user_id))
def test_store_can_register_new_user(self):
user = self.user_store.register_user(self.testname, self.testpass)
self.assertTrue(self.user_store.user_exists(user.user_id))
retrieved_user = self.user_store.get_user_from_index(username=self.testname)
self.assertEqual(user.username, retrieved_user.username)
def test_store_can_get_existing_user(self):
self.add_test_user()
user = self.user_store.get_user(self.testname)
self.assertEqual(self.testname, user.username)
def test_store_cannot_update_incorrect_password(self):
self.add_test_user()
user = self.user_store.get_user_from_index(username=self.testname)
error = None
try:
self.user_store.update_password(user.username, "<PASSWORD>", "<PASSWORD>")
except UserError as e:
error = e
self.assertNotEqual(error, None)
self.assertEqual(error.message, "Incorrect password")
def test_store_can_update_password(self):
self.add_test_user()
user = self.user_store.get_user_from_index(username=self.testname)
self.user_store.update_password(user.username, self.testpass, "<PASSWORD>")
user = self.user_store.get_user_from_index(username=self.testname)
self.assertTrue(user.verify_password("<PASSWORD>"))
def test_store_cannot_delete_user_without_password_hash(self):
self.add_test_user()
user = User({"username": self.testname})
error = None
try:
self.user_store.delete_user(user)
except UserError as e:
error = e
self.assertNotEqual(error, None)
self.assertEqual(error.message, "Cannot delete user without a password")
def test_store_can_delete_user(self):
user = self.add_test_user()
self.user_store.delete_user(user)
self.assertEqual(self.user_store.user_exists(self.testname), False)
def test_user_store_can_generate_auth_token(self):
user = self.add_test_user()
token = self.user_store.generate_auth_token(user.user_id, expiration=600)
s = Serializer(self.user_store.secret_key)
data = s.loads(token)
self.assertEqual(user.user_id, data["user_id"])
def test_user_auth_token_can_expire(self):
user = self.add_test_user()
token = self.user_store.generate_auth_token(user.user_id, expiration=0.1)
time.sleep(1)
s = Serializer(self.user_store.secret_key)
error = None
try:
s.loads(token)
except SignatureExpired as err:
error = err
self.assertNotEqual(error, None)
def test_user_object_can_verify_auth_token(self):
user = self.add_test_user()
token = self.user_store.generate_auth_token(user.user_id, expiration=0.1)
verified_user = self.user_store.verify_auth_token(token)
self.assertEqual(verified_user.user_id, user.user_id)
|
import codecs
import csv
import argparse
import re
METRIC_FILE = 'logs/chatbot_metrics.txt'
CLASSES_FILE = 'logs/class_correspondances.csv'
PROPERTIES_FILE = 'logs/property_correspondances.csv'
UNDER_BRACKETS_RE = re.compile('\[(.*?)\]')
DATE_FORMAT = '%Y-%m-%d %H:%M:%S,%f'
CSV_PROPERTIES_HEADER = ['domain_field',
'domain_class',
'property_uri',
'field_range',
'decision',
'field_type',
'field_is_facet']
CSV_CLASSES_HEADER = ['field_name',
'class_uri',
'decision',
'field_type',
'field_is_facet']
CSV_DELIMITER = '|'
REPLACE_DELIMITER = ' '
METRIC_FILE_TREMPLATE = '''nb_semantized_dataset: {}
nb_canceled_semantization: {}
nb_failed_semantization: {}
avg_server_time: {}sec
avg_client_time: {}sec
avg_total_time: {}sec'''
def generate_files(datasets):
avg_server_time = []
avg_client_time = []
with codecs.open(PROPERTIES_FILE, "w") as properties_file:
properties_file_writer = csv.writer(properties_file, delimiter=CSV_DELIMITER)
properties_file_writer.writerow(CSV_PROPERTIES_HEADER)
with codecs.open(CLASSES_FILE, "w") as classes_file:
classes_file_writer = csv.writer(classes_file, delimiter=CSV_DELIMITER)
classes_file_writer.writerow(CSV_CLASSES_HEADER)
for dataset_id, dataset_semantization in datasets['datasets'].items():
# Metrics update
if dataset_semantization['server_time'] and dataset_semantization['client_time']:
avg_server_time.append(sum(dataset_semantization['server_time'])/len(dataset_semantization['server_time']))
avg_client_time.append(sum(dataset_semantization['client_time'])/len(dataset_semantization['client_time']))
# CSV update
# Classes
for class_correspondance in dataset_semantization['correspondances']['classes']:
row = []
for column_name in CSV_CLASSES_HEADER:
row.append(class_correspondance[column_name])
classes_file_writer.writerow(row)
for property_correspondance in dataset_semantization['correspondances']['properties']:
row = []
for column_name in CSV_PROPERTIES_HEADER:
row.append(property_correspondance[column_name])
properties_file_writer.writerow(row)
nb_aborted_semantization = datasets['nb_semantization_begin'] - datasets['nb_semantization_finished']
with open(METRIC_FILE, 'w') as metric_file:
if avg_server_time and avg_client_time:
avg_server_time = sum(avg_server_time)/len(avg_server_time)
avg_client_time = sum(avg_client_time)/len(avg_client_time)
avg_total_time = avg_server_time + avg_client_time
metric_file.write(METRIC_FILE_TREMPLATE.format(datasets['nb_semantization_finished'],
nb_aborted_semantization,
datasets['nb_semantization_failed'],
avg_server_time,
avg_client_time,
avg_total_time))
def parse_line(line, datasets):
strings_under_brackets = re.findall(UNDER_BRACKETS_RE, line)
if strings_under_brackets:
dataset_id = strings_under_brackets[3]
if not datasets['datasets'].get(dataset_id):
datasets['datasets'][dataset_id] = {'server_time': [], 'client_time':[], 'correspondances': {'properties': [], 'classes': []}}
if 'Starting semantization' in line:
# Starting semantization:
# e.g:[2018-12-13 08:34:09,391] [INFO] [correspondance_api] [roman-emperors@public] Starting semantization
datasets['nb_semantization_begin'] += 1
elif 'semantization complete' in line:
# Semantization complete:
# e.g:[2018-12-13 09:41:12,713] [INFO] [correspondance_api] [roman-emperors@public] semantization complete
datasets['nb_semantization_finished'] += 1
elif 'No correspondances found' in line:
# No classes found for this dataset
# e.g:[2018-12-14 09:43:56,440] [INFO] [correspondance_api] [roman-emperors@public] No correspondances found
datasets['nb_semantization_failed'] += 1
elif '[Time]' in line:
# Semantization time:
# e.g:[2019-04-18 10:22:17,209] [INFO] [correspondance_api] [roman-emperors@public] [Time]...
# server_time:[42] client_time:[35]
datasets['datasets'][dataset_id]['server_time'].append(int(strings_under_brackets[5]))
datasets['datasets'][dataset_id]['client_time'].append(int(strings_under_brackets[6]))
elif len(strings_under_brackets) == 12:
# Property correspondance
# e.g:[2018-12-13 09:41:12,680] [INFO] [correspondance_api] [roman-emperors@public]...
# [Property] [CONFIRMED] field_domain:[name] class_domain[Royalty]...
# -- uri:[http://www.loc.gov/mads/rdf/v1#birthPlace] --> field_range:[birth_cty]
# field_type:[text] field_is_facet:[False]
property_correspondance = {'decision': strings_under_brackets[5],
'domain_field': strings_under_brackets[6],
'domain_class': strings_under_brackets[7],
'property_uri': strings_under_brackets[8],
'field_range': strings_under_brackets[9],
'field_type': strings_under_brackets[10],
'field_is_facet': strings_under_brackets[11]}
datasets['datasets'][dataset_id]['correspondances']['properties'].append(property_correspondance)
elif len(strings_under_brackets) == 10:
# Class correspondance
# e.g: [2018-12-13 09:41:12,680] [INFO] [correspondance_api] [roman-emperors@public] [Class] [CONFIRMED]...
# field:[birth_cty] uri:[http://dbpedia.org/ontology/Settlement]
# field_type:[text] field_is_facet:[False]
class_correspondance = {'decision': strings_under_brackets[5],
'field_name': strings_under_brackets[6],
'class_uri': strings_under_brackets[7],
'field_type': strings_under_brackets[8],
'field_is_facet': strings_under_brackets[9]}
datasets['datasets'][dataset_id]['correspondances']['classes'].append(class_correspondance)
def main():
parser = argparse.ArgumentParser(prog='log_analyser', description='return statistics on chatbot usage')
parser.add_argument('log_file_path', metavar='FP', type=str, nargs='+',
help='Path to the log file to analyse')
args = parser.parse_args()
filepath = args.log_file_path[0]
datasets = {'nb_semantization_begin': 0, 'nb_semantization_finished': 0, 'nb_semantization_failed': 0, 'datasets': {}}
with open(filepath) as fp:
line = fp.readline()
while line:
parse_line(line.strip(), datasets)
line = fp.readline()
generate_files(datasets)
if __name__ == "__main__":
main()
|
# Databricks notebook source exported at Mon, 14 Mar 2016 03:21:05 UTC
# MAGIC %md
# MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [<NAME>](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [<NAME>](https://www.linkedin.com/in/sivanand), and *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome).
# COMMAND ----------
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Module Three Lectures
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 5: Semi-Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/qzMs9Sq_DHw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/pMSGGZVSwqo')
# COMMAND ----------
displayHTML('https://youtube.com/embed/NJyBQ-cQ3Ac')
# COMMAND ----------
displayHTML('https://youtube.com/embed/G_67yUxdDbU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Llof8ZgCHFE')
# COMMAND ----------
displayHTML('https://youtube.com/embed/KjzoBzCxHMs')
# COMMAND ----------
displayHTML('https://youtube.com/embed/25YMAapjJgw')
# COMMAND ----------
displayHTML('https://youtube.com/embed/otrnf8MQ8S8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/8vpmMbmUAiA')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Wc7zJG-N2B8')
# COMMAND ----------
displayHTML('https://youtube.com/embed/c2MFJI_NWVw')
# COMMAND ----------
# MAGIC %md
# MAGIC ### Lecture 6: Structured Data
# COMMAND ----------
displayHTML('https://youtube.com/embed/lODYQTgyqLk')
# COMMAND ----------
displayHTML('https://youtube.com/embed/BZuv__KF4qU')
# COMMAND ----------
displayHTML('https://youtube.com/embed/khFzRxjk2Tg')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tAepBMlGvak')
# COMMAND ----------
displayHTML('https://youtube.com/embed/XAyWtVtBTlI')
# COMMAND ----------
displayHTML('https://youtube.com/embed/Zp0EF2Dghik')
# COMMAND ----------
displayHTML('https://youtube.com/embed/iAqgcaKERHM')
# COMMAND ----------
displayHTML('https://youtube.com/embed/kaX4I2jENJc')
# COMMAND ----------
displayHTML('https://youtube.com/embed/tBsNkJyFr2w') |
from typing import Optional
import torch
from torch.nn import functional as F
def aa_to_rotmat(theta: torch.Tensor):
"""
Convert axis-angle representation to rotation matrix.
Works by first converting it to a quaternion.
Args:
theta (torch.Tensor): Tensor of shape (B, 3) containing axis-angle representations.
Returns:
torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).
"""
norm = torch.norm(theta + 1e-8, p = 2, dim = 1)
angle = torch.unsqueeze(norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim = 1)
return quat_to_rotmat(quat)
def quat_to_rotmat(quat: torch.Tensor) -> torch.Tensor:
"""
Convert quaternion representation to rotation matrix.
Args:
quat (torch.Tensor) of shape (B, 4); 4 <===> (w, x, y, z).
Returns:
torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).
"""
norm_quat = quat
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def rot6d_to_rotmat(x: torch.Tensor) -> torch.Tensor:
"""
Convert 6D rotation representation to 3x3 rotation matrix.
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
Args:
x (torch.Tensor): (B,6) Batch of 6-D rotation representations.
Returns:
torch.Tensor: Batch of corresponding rotation matrices with shape (B,3,3).
"""
x = x.reshape(-1,2,3).permute(0, 2, 1).contiguous()
a1 = x[:, :, 0]
a2 = x[:, :, 1]
b1 = F.normalize(a1)
b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
b3 = torch.cross(b1, b2)
return torch.stack((b1, b2, b3), dim=-1)
def perspective_projection(points: torch.Tensor,
translation: torch.Tensor,
focal_length: torch.Tensor,
camera_center: Optional[torch.Tensor] = None,
rotation: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Computes the perspective projection of a set of 3D points.
Args:
points (torch.Tensor): Tensor of shape (B, N, 3) containing the input 3D points.
translation (torch.Tensor): Tensor of shape (B, 3) containing the 3D camera translation.
focal_length (torch.Tensor): Tensor of shape (B, 2) containing the focal length in pixels.
camera_center (torch.Tensor): Tensor of shape (B, 2) containing the camera center in pixels.
rotation (torch.Tensor): Tensor of shape (B, 3, 3) containing the camera rotation.
Returns:
torch.Tensor: Tensor of shape (B, N, 2) containing the projection of the input points.
"""
batch_size = points.shape[0]
if rotation is None:
rotation = torch.eye(3, device=points.device, dtype=points.dtype).unsqueeze(0).expand(batch_size, -1, -1)
if camera_center is None:
camera_center = torch.zeros(batch_size, 2, device=points.device, dtype=points.dtype)
# Populate intrinsic camera matrix K.
K = torch.zeros([batch_size, 3, 3], device=points.device, dtype=points.dtype)
K[:,0,0] = focal_length[:,0]
K[:,1,1] = focal_length[:,1]
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1] |
<reponame>Stegallo/adventofcode
from .common import AoCDay
class Day(AoCDay):
def __init__(self):
super().__init__(16)
def _preprocess_input(self):
self.__input = [i for i in self._input_data]
def _calculate_1(self):
# info(self._input_data)
# return 0
rules = {}
# yours = []
others = []
rules_flag = True
# your = False
other = False
for i in self.__input:
# print(i)
if i == "":
rules_flag = False
if i == "your ticket:":
# your = True
continue
if i == "nearby tickets:":
other = True
continue
if rules_flag:
# print(i)
# print(i[: i.index(":")])
# print(i[i.index(":") + 2 :].split(" or "))
rules[i[: i.index(":")]] = i[i.index(":") + 2 :].split(" or ")
# if your:
# # print(i.split())
# yours = i.split(",")
if other:
others.append(i.split(","))
# print(rules)
# print(yours)
# print(others)
result = 0
for i in others:
# print(i)
for j in i:
valid = False
# print(j)
for k, v in rules.items():
# print(f"{k=}, {v=}, {j=}")
for c in range(2):
a = int(v[c].split("-")[0])
b = int(v[c].split("-")[1])
# print(f"{j=},{a=},{b=},{a <= int(j) <= b=}")
if a <= int(j) <= b:
valid = True
if not valid:
# print(f"not valid: {j=}")
result += int(j)
return result
def _calculate_2(self):
# info(self._input_data)
rules = {}
yours = []
others = []
rules_flag = True
your = False
other = False
for i in self.__input:
# print(i)
if i == "":
rules_flag = False
if i == "your ticket:":
your = True
continue
if i == "nearby tickets:":
other = True
continue
if rules_flag:
# print(i)
# print(i[: i.index(":")])
# print(i[i.index(":") + 2 :].split(" or "))
rules[i[: i.index(":")]] = i[i.index(":") + 2 :].split(" or ")
if your:
# print(i.split())
yours = i.split(",")
your = False
if other:
others.append(i.split(","))
# print(rules)
# discard invalid
# print(f"{len(others)=}")
valid_others = [i for i in others if is_valid(i, rules)]
# print(f"{len(valid_others)=}")
# for k in rules:
# print(f"{k=}")
locations = []
for p in valid_others[0]:
locations.append([r for r in rules])
# print(f"{locations=}")
for i in valid_others:
# print(i)
for p, j in enumerate(i):
canbe = []
# print(j)
for k, v in rules.items():
# print(f"{k=}, {v=}, {j=}")
for c in range(2):
a = int(v[c].split("-")[0])
b = int(v[c].split("-")[1])
# print(f"{j=},{a=},{b=},{a <= int(j) <= b=}")
if a <= int(j) <= b:
canbe.append(k)
# print("cista")
# valid = True
# print(f"{canbe=} in position {p}\n")
new_poss_loc = []
for l in locations[p]:
# print(l)
if l not in canbe:
...
# print(f"remove {l}")
else:
new_poss_loc.append(l)
locations[p] = new_poss_loc
# print(f"{locations[p]=}")
# print("\n\n")
# print(f"{locations=}")
iters = 0
while iters < 100:
for ci, i in enumerate(locations):
# print(f"{i=}, {len(i)=}, {ci=}")
if len(i) == 1:
# print(i)
for cj, j in enumerate(locations):
if ci == cj:
continue
# print(j)
x = []
for k in j:
# print(f"{k=}, {i[0]=}")
if i[0] == k:
...
else:
x.append(k)
#
# print(f"{x=}")
locations[cj] = x
iters += 1
# print(f"{locations=}")
#
# print(f"{len(locations)=}")
# print(f"{len(yours)=}")
#
# print(f"{yours=}")
result = 1
for c, i in enumerate(locations):
# print(f"{i=}, {c=}")
if "departure" in i[0]:
# print(yours[c])
result *= int(yours[c])
return result
def is_valid(i, rules):
for j in i:
valid = False
# print(j)
for k, v in rules.items():
# print(f"{k=}, {v=}, {j=}")
for c in range(2):
a = int(v[c].split("-")[0])
b = int(v[c].split("-")[1])
# print(f"{j=},{a=},{b=},{a <= int(j) <= b=}")
if a <= int(j) <= b:
valid = True
if not valid:
return False
return valid
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter, ScalarFormatter
def clip(value_before_switch, value_after_switch, t_switch, t):
"""
logical function of time. Changes value at threshold time t_switch.
"""
if t <= t_switch:
return value_before_switch
else:
return value_after_switch
class Clipper:
"""
Class helper. Rather than using clip(var, var1, t_switch, t), defines
var as a function of the time: var(t).
"""
def __init__(self, value_before_switch, value_after_switch,
trigger_value):
self.value_before_switch = value_before_switch
self.value_after_switch = value_after_switch
self.trigger_value = trigger_value
def __call__(self, t):
return clip(self.value_before_switch, self.value_after_switch,
self.trigger_value, t)
def make_patch_spines_invisible(ax):
"""
Helper from matplotlib gallery (Multiple Yaxis With Spines)
"""
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def plot_world_state(world2, title=None, dist_spines=0.09):
"""
Plots world state from a World2 instance.
"""
fig, host = plt.subplots(figsize=(7, 4))
axs = [host, ]
for i in range(4):
axs.append(host.twinx())
fig.subplots_adjust(left=dist_spines*2)
for i, ax in enumerate(axs[1:]):
ax.spines["left"].set_position(("axes", -(i + 1)*dist_spines))
ax.spines["left"].set_visible(True)
ax.yaxis.set_label_position('left')
ax.yaxis.set_ticks_position('left')
ps = []
for ax, color, label, ydata in zip(axs,
["black", "#e7298a", "#d95f02",
"#7570b3", "#1b9e77"],
["P", "POLR ", "CI ", "QL ", "NR "],
[world2.p, world2.polr, world2.ci,
world2.ql, world2.nr]):
ps.append(ax.plot(world2.time, ydata, color=color, label=label)[0])
axs[0].grid(1)
axs[0].set_xlim(world2.time[0], world2.time[-1])
axs[0].set_ylim(0, 8e9)
axs[1].set_ylim(0, 40)
axs[2].set_ylim(0, 20e9)
axs[3].set_ylim(0, 2)
axs[4].set_ylim(0, 1000e9)
for ax_, formatter_ in zip(axs,
[EngFormatter(places=0, sep="\N{THIN SPACE}"),
EngFormatter(places=0, sep="\N{THIN SPACE}"),
EngFormatter(places=0, sep="\N{THIN SPACE}"),
ScalarFormatter(),
EngFormatter(places=0, sep="\N{THIN SPACE}")]):
ax_.tick_params(axis='y', rotation=90)
ax_.yaxis.set_major_locator(plt.MaxNLocator(4))
ax_.yaxis.set_major_formatter(formatter_)
tkw = dict(size=4, width=1.5)
axs[0].set_xlabel("time [years]")
axs[0].tick_params(axis='x', **tkw)
for i, (ax, p) in enumerate(zip(axs, ps)):
ax.set_ylabel(p.get_label(), rotation="horizontal")
ax.yaxis.label.set_color(p.get_color())
ax.tick_params(axis='y', colors=p.get_color(), **tkw)
ax.yaxis.set_label_coords(-i*dist_spines, 1.01)
if title is not None:
fig.suptitle(title, x=0.95, ha="right", fontsize=10)
plt.tight_layout()
|
<reponame>kcotar/Gaia_clusters_potential
import numpy as np
from astropy.table import Table
from copy import deepcopy
class ISOCHRONES():
"""
"""
def __init__(self, file_path, photo_system='UBVRIJHK'):
"""
:param file_path:
:param photo_system: Can be UBVRIJHK or Gaia
"""
self.data_all = Table.read(file_path)
self.isochrone_meh = None
self.isochrone_age = None
self.isochrone_data = None
self.system = photo_system
def _is_isochrone_selected(self):
"""
:return:
"""
return self.isochrone_data is not None
def select_isochrone(self, meh, age):
"""
Determine isochrone that fits best to the selected input parameters
:param meh:
:param age:
:return:
"""
meh_uniq = np.unique(self.data_all['MHini'])
self.isochrone_meh = meh_uniq[np.argmin(np.abs(meh_uniq - meh))]
age_uniq = np.unique(self.data_all['Age'])
self.isochrone_age = age_uniq[np.argmin(np.abs(age_uniq - age))]
self.isochrone_data = self.data_all[np.logical_and(self.data_all['MHini'] == self.isochrone_meh,
self.data_all['Age'] == self.isochrone_age)]
self.isochrone_data['Mloss'] = self.isochrone_data['Mini'] - self.isochrone_data['Mass']
def detemine_stellar_mass(self, parsec_dist, teff=None, logg=None,
gmag=None, gbpmag=None, grpmag=None):
"""
Determine mass off observed star based on its input parameters - photometric and spectroscopic physical.
:param teff:
:param logg:
:param gmag:
:param gbpmag:
:param grpmag:
:return:
"""
if not self._is_isochrone_selected():
raise ValueError('Isochrone not selected')
# selection based on Gmag only - experimental first try for dr51
if gmag is None:
raise ValueError('Gmag not given')
gmag_abs = gmag - 2.5*np.log10((parsec_dist/10.)**2)
idx_iso = np.argsort(np.abs(self.isochrone_data['Gmag'] - gmag_abs))[:2]
# get point point between the nearest ones
d_frac = (self.isochrone_data['Gmag'][idx_iso[0]] - gmag_abs) / (self.isochrone_data['Gmag'][idx_iso[0]] - self.isochrone_data['Gmag'][idx_iso[1]])
# print d_frac
# print self.isochrone_data['Gmag'][idx_iso]
# print self.isochrone_data['Mass'][idx_iso]
mass = self.isochrone_data['Mass'][idx_iso[0]] - d_frac * (self.isochrone_data['Mass'][idx_iso[0]] - self.isochrone_data['Mass'][idx_iso[1]])
return mass
def get_hr_magnitudes_data(self, max_Mini=None, max_Mloss=None, cluster_dist=100):
"""
:param max_Mini: in solar mass
:param max_Mloss: in solar mass
:param cluster_dist: in parsecs
:return:
"""
isochrone_data_sub = deepcopy(self.isochrone_data)
# select the maximum initial stellar mas
if max_Mini is not None:
isochrone_data_sub = isochrone_data_sub[isochrone_data_sub['Mini'] < max_Mini]
# select the maximum stellar mas loss
if max_Mloss is not None:
isochrone_data_sub = isochrone_data_sub[isochrone_data_sub['Mloss'] < max_Mloss]
# correct magnitudes for cluster distance as the are give in absolute mag (@10pc) in isochrone
if self.system == 'UBVRIJHK':
b_mag = isochrone_data_sub['Bmag'] + 2.5*np.log10((cluster_dist/10.)**2)
v_mag = isochrone_data_sub['Vmag'] + 2.5*np.log10((cluster_dist/10.)**2)
x_data = b_mag - v_mag
y_data = v_mag
elif self.system == 'Gaia':
x_data = isochrone_data_sub['G_BPmag'] - isochrone_data_sub['G_RPmag']
y_data = isochrone_data_sub['Gmag'] + 2.5 * np.log10((cluster_dist / 10.) ** 2)
return x_data, y_data
|
<reponame>Bensonlmx/data.gov.sg-visualisations-using-pandas-matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('/Users/benson/Desktop/Upskilling/SP/IT8701 Introduction to Programming for Data Science/CA2/median-rent-by-town-and-flat-type.csv', sep=',')
re2020 = '^2020'
df1 = df[df['quarter'].str.contains(re2020)]
dfAngMoKio = df1[(df1.flat_type =='4-RM') & (df1.town == 'ANG MO KIO')]
sum_AngMoKio = dfAngMoKio[:]['median_rent'].median()
dfBedok = df1[(df1.flat_type =='4-RM') & (df1.town == 'BEDOK')]
sum_Bedok = dfBedok[:]['median_rent'].median()
dfBishan = df1[(df1.flat_type =='4-RM') & (df1.town == 'BISHAN')]
sum_Bishan = dfBishan[:]['median_rent'].median()
dfBukitBatok = df1[(df1.flat_type =='4-RM') & (df1.town == 'BUKIT BATOK')]
sum_BukitBatok = dfBukitBatok[:]['median_rent'].median()
dfBukitMerah = df1[(df1.flat_type =='4-RM') & (df1.town == 'BUKIT MERAH')]
sum_BukitMerah = dfBukitMerah[:]['median_rent'].median()
dfBukitPanjang = df1[(df1.flat_type =='4-RM') & (df1.town == 'BUKIT PANJANG')]
sum_BukitPanjang = dfBukitPanjang[:]['median_rent'].median()
dfCentral = df1[(df1.flat_type =='4-RM') & (df1.town == 'CENTRAL')]
sum_Central = dfCentral[:]['median_rent'].median()
dfChoaChuKang = df1[(df1.flat_type =='4-RM') & (df1.town == 'CHOA CHU KANG')]
sum_ChoaChuKang = dfChoaChuKang[:]['median_rent'].median()
dfClementi = df1[(df1.flat_type =='4-RM') & (df1.town == 'CLEMENTI')]
sum_Clementi = dfClementi[:]['median_rent'].median()
dfGeylang = df1[(df1.flat_type =='4-RM') & (df1.town == 'GEYLANG')]
sum_Geylang = dfGeylang[:]['median_rent'].median()
dfHougang = df1[(df1.flat_type =='4-RM') & (df1.town == 'HOUGANG')]
sum_Hougang = dfHougang[:]['median_rent'].median()
dfJurongEast = df1[(df1.flat_type =='4-RM') & (df1.town == 'JURONG EAST')]
sum_JurongEast = dfJurongEast[:]['median_rent'].median()
dfJurongWest = df1[(df1.flat_type =='4-RM') & (df1.town == 'JURONG WEST')]
sum_JurongWest = dfJurongWest[:]['median_rent'].median()
dfKallang_Whampoa = df1[(df1.flat_type =='4-RM') & (df1.town == 'KALLANG/WHAMPOA')]
sum_Kallang_Whampoa = dfKallang_Whampoa[:]['median_rent'].median()
dfMarineParade = df1[(df1.flat_type =='4-RM') & (df1.town == 'MARINE PARADE')]
sum_MarineParade = dfMarineParade[:]['median_rent'].median()
print(sum_MarineParade)
dfPasirRis = df1[(df1.flat_type =='4-RM') & (df1.town == 'PASIR RIS')]
sum_PasirRis = dfPasirRis[:]['median_rent'].median()
dfPunggol = df1[(df1.flat_type =='4-RM') & (df1.town == 'PUNGGOL')]
sum_Punggol = dfPunggol[:]['median_rent'].median()
dfQueenstown = df1[(df1.flat_type =='4-RM') & (df1.town == 'QUEENSTOWN')]
sum_Queenstown = dfQueenstown[:]['median_rent'].median()
dfSembawang = df1[(df1.flat_type =='4-RM') & (df1.town == 'SEMBAWANG')]
sum_Sembawang = dfSembawang[:]['median_rent'].median()
dfSengkang = df1[(df1.flat_type =='4-RM') & (df1.town == 'SENGKANG')]
sum_Sengkang = dfSengkang[:]['median_rent'].median()
dfSerangoon = df1[(df1.flat_type =='4-RM') & (df1.town == 'SERANGOON')]
sum_Serangoon = dfSerangoon[:]['median_rent'].median()
dfTampines = df1[(df1.flat_type =='4-RM') & (df1.town == 'TAMPINES')]
sum_Tampines = dfTampines[:]['median_rent'].median()
dfToaPayoh = df1[(df1.flat_type =='4-RM') & (df1.town == 'TOA PAYOH')]
sum_ToaPayoh = dfToaPayoh[:]['median_rent'].median()
dfWoodlands = df1[(df1.flat_type =='4-RM') & (df1.town == 'WOODLANDS')]
sum_Woodlands = dfWoodlands[:]['median_rent'].median()
dfYishun = df1[(df1.flat_type =='4-RM') & (df1.town == 'YISHUN')]
sum_Yishun = dfYishun[:]['median_rent'].median()
data = [sum_AngMoKio, sum_Bedok, sum_Bishan, sum_BukitBatok, sum_BukitMerah, sum_BukitPanjang, sum_Central, sum_ChoaChuKang, sum_Clementi, sum_Geylang, sum_Hougang, sum_JurongEast, sum_JurongWest, sum_Kallang_Whampoa, sum_MarineParade, sum_PasirRis, sum_Punggol, sum_Queenstown, sum_Sembawang, sum_Sengkang, sum_Serangoon, sum_Tampines, sum_ToaPayoh, sum_Woodlands, sum_Yishun]
labels = ['Ang Mo Kio', 'Bedok', 'Bishan', 'Bukit Batok', 'Bukit Merah', 'Bukit Panjang', 'Central', 'Choa Chu Kang', 'Clement', 'Geylang', 'Hougang', 'Jurong East', 'Jurong West', 'Kallang/Whampoa', 'Marine Parade', 'Pasir Ris', 'Punggol', 'Queenstown', 'Sembawang', 'Sengkang', 'Serangoon', 'Tampines', 'Toa Payoh', 'Woodlands', 'Yishun']
plt.xticks(range(len(data)), labels, rotation=90)
plt.xlabel('Town')
plt.ylabel('Median rent (S$)')
plt.title('Median rent of 4-room HDB flat by town in 2020')
plt.scatter(range(len(data)), data)
plt.grid(color='#95a5a6', linestyle='--', linewidth=2, axis='y', alpha=0.7)
plt.show()
|
import random
import threading
import functools
from collections import Counter
from src.gamemodes import game_mode, GameMode
from src.messages import messages
from src.containers import UserList, UserDict
from src.decorators import command, handle_error
from src.functions import get_players, change_role
from src.status import add_dying
from src.events import EventListener
from src import channels
@game_mode("sleepy", minp=10, maxp=24, likelihood=5)
class SleepyMode(GameMode):
"""A small village has become the playing ground for all sorts of supernatural beings."""
def __init__(self, arg=""):
super().__init__(arg)
self.ROLE_GUIDE = {
10: ["wolf", "werecrow", "traitor", "cultist", "seer", "prophet", "priest", "dullahan", "cursed villager", "blessed villager"],
12: ["wolf(2)", "vigilante"],
15: ["wolf(3)", "detective", "vengeful ghost"],
18: ["wolf(4)", "harlot", "monster"],
21: ["wolf(5)", "village drunk", "monster(2)", "gunner"],
}
self.NIGHTMARE_CHANCE = 1/5
self.NIGHTMARE_MAX = 1
self.TURN_CHANCE = 3/5
# Make sure priest is always prophet AND blessed, and that drunk is always gunner
self.SECONDARY_ROLES["blessed villager"] = ["priest"]
self.SECONDARY_ROLES["prophet"] = ["priest"]
self.SECONDARY_ROLES["gunner"] = ["village drunk"]
self.EVENTS = {
"dullahan_targets": EventListener(self.dullahan_targets),
"transition_night_begin": EventListener(self.setup_nightmares),
"chk_nightdone": EventListener(self.prolong_night),
"transition_day_begin": EventListener(self.nightmare_kill),
"del_player": EventListener(self.happy_fun_times),
"revealroles": EventListener(self.on_revealroles),
"night_idled": EventListener(self.on_night_idled)
}
self.having_nightmare = UserList()
cmd_params = dict(chan=False, pm=True, playing=True, phases=("night",),
users=self.having_nightmare, register=False)
self.north_cmd = command("north", **cmd_params)(functools.partial(self.move, "n"))
self.east_cmd = command("east", **cmd_params)(functools.partial(self.move, "e"))
self.south_cmd = command("south", **cmd_params)(functools.partial(self.move, "s"))
self.west_cmd = command("west", **cmd_params)(functools.partial(self.move, "w"))
self.correct = UserDict()
self.fake1 = UserDict()
self.fake2 = UserDict()
self.step = UserDict()
self.prev_direction = UserDict()
self.start_direction = UserDict()
self.on_path = UserDict()
def startup(self):
super().startup()
self.north_cmd.register()
self.east_cmd.register()
self.south_cmd.register()
self.west_cmd.register()
def teardown(self):
super().teardown()
self.north_cmd.remove()
self.east_cmd.remove()
self.south_cmd.remove()
self.west_cmd.remove()
self.having_nightmare.clear()
self.correct.clear()
self.fake1.clear()
self.fake2.clear()
self.step.clear()
self.prev_direction.clear()
self.start_direction.clear()
self.on_path.clear()
def dullahan_targets(self, evt, var, dullahan, max_targets):
evt.data["targets"].update(var.ROLES["priest"])
def setup_nightmares(self, evt, var):
pl = get_players()
for i in range(self.NIGHTMARE_MAX):
if not pl:
break
if random.random() < self.NIGHTMARE_CHANCE:
with var.WARNING_LOCK:
target = random.choice(pl)
pl.remove(target)
t = threading.Timer(60, self.do_nightmare, (var, target, var.NIGHT_COUNT))
t.daemon = True
t.start()
@handle_error
def do_nightmare(self, var, target, night):
if var.PHASE != "night" or var.NIGHT_COUNT != night:
return
if target not in get_players():
return
self.having_nightmare.append(target)
target.send(messages["sleepy_nightmare_begin"])
target.send(messages["sleepy_nightmare_navigate"])
self.correct[target] = [None, None, None]
self.fake1[target] = [None, None, None]
self.fake2[target] = [None, None, None]
directions = ["n", "e", "s", "w"]
self.step[target] = 0
self.prev_direction[target] = None
opposite = {"n": "s", "e": "w", "s": "n", "w": "e"}
for i in range(3):
corrdir = directions[:]
f1dir = directions[:]
f2dir = directions[:]
if i > 0:
corrdir.remove(opposite[self.correct[target][i-1]])
f1dir.remove(opposite[self.fake1[target][i-1]])
f2dir.remove(opposite[self.fake2[target][i-1]])
else:
corrdir.remove("s")
f1dir.remove("s")
f2dir.remove("s")
self.correct[target][i] = random.choice(corrdir)
self.fake1[target][i] = random.choice(f1dir)
self.fake2[target][i] = random.choice(f2dir)
self.prev_direction[target] = "n"
self.start_direction[target] = "n"
self.on_path[target] = set()
self.nightmare_step(target)
def nightmare_step(self, target):
# FIXME: hardcoded English
if self.prev_direction[target] == "n":
directions = "north, east, and west"
elif self.prev_direction[target] == "e":
directions = "north, east, and south"
elif self.prev_direction[target] == "s":
directions = "east, south, and west"
elif self.prev_direction[target] == "w":
directions = "north, south, and west"
else:
# wat? reset them
self.step[target] = 0
self.prev_direction[target] = self.start_direction[target]
self.on_path[target] = set()
directions = "north, east, and west"
if self.step[target] == 0:
target.send(messages["sleepy_nightmare_0"].format(directions))
elif self.step[target] == 1:
target.send(messages["sleepy_nightmare_1"].format(directions))
elif self.step[target] == 2:
target.send(messages["sleepy_nightmare_2"].format(directions))
elif self.step[target] == 3:
if "correct" in self.on_path[target]:
target.send(messages["sleepy_nightmare_wake"])
self.having_nightmare.remove(target)
elif "fake1" in self.on_path[target]:
target.send(messages["sleepy_nightmare_fake_1"])
self.step[target] = 0
self.on_path[target] = set()
self.prev_direction[target] = self.start_direction[target]
self.nightmare_step(target)
elif "fake2" in self.on_path[target]:
target.send(messages["sleepy_nightmare_fake_2"])
self.step[target] = 0
self.on_path[target] = set()
self.prev_direction[target] = self.start_direction[target]
self.nightmare_step(target)
def move(self, direction, var, wrapper, message):
opposite = {"n": "s", "e": "w", "s": "n", "w": "e"}
target = wrapper.source
if self.prev_direction[target] == opposite[direction]:
wrapper.pm(messages["sleepy_nightmare_invalid_direction"])
return
advance = False
step = self.step[target]
if ("correct" in self.on_path[target] or step == 0) and self.correct[target][step] == direction:
self.on_path[target].add("correct")
advance = True
else:
self.on_path[target].discard("correct")
if ("fake1" in self.on_path[target] or step == 0) and self.fake1[target][step] == direction:
self.on_path[target].add("fake1")
advance = True
else:
self.on_path[target].discard("fake1")
if ("fake2" in self.on_path[target] or step == 0) and self.fake2[target][step] == direction:
self.on_path[target].add("fake2")
advance = True
else:
self.on_path[target].discard("fake2")
if advance:
self.step[target] += 1
self.prev_direction[target] = direction
else:
self.step[target] = 0
self.on_path[target] = set()
self.prev_direction[target] = self.start_direction[target]
wrapper.pm(messages["sleepy_nightmare_restart"])
self.nightmare_step(target)
def prolong_night(self, evt, var):
evt.data["nightroles"].extend(self.having_nightmare)
def on_night_idled(self, evt, var, player):
# don't give warning points if the person having a nightmare idled out night
if player in self.having_nightmare:
evt.prevent_default = True
def nightmare_kill(self, evt, var):
pl = get_players()
for player in self.having_nightmare:
if player not in pl:
continue
add_dying(var, player, "bot", "night_kill")
player.send(messages["sleepy_nightmare_death"])
self.having_nightmare.clear()
def happy_fun_times(self, evt, var, player, all_roles, death_triggers):
if death_triggers and evt.params.main_role == "priest":
channels.Main.send(messages["sleepy_priest_death"])
mapping = {"seer": "doomsayer", "harlot": "succubus", "cultist": "demoniac"}
for old, new in mapping.items():
turn = [p for p in get_players((old,)) if random.random() < self.TURN_CHANCE]
for t in turn:
# messages: sleepy_doomsayer_turn, sleepy_succubus_turn, sleepy_demoniac_turn
change_role(var, t, old, new, message="sleepy_{0}_turn".format(new))
newstats = set()
for rs in var.ROLE_STATS:
d = Counter(dict(rs))
newstats.add(rs)
if old in d and d[old] >= 1:
d[old] -= 1
d[new] += 1
newstats.add(frozenset(d.items()))
var.ROLE_STATS = frozenset(newstats)
def on_revealroles(self, evt, var):
if self.having_nightmare:
evt.data["output"].append(messages["sleepy_revealroles"].format(self.having_nightmare))
|
from onegov.ballot import ElectionCollection
from onegov.ballot import ElectionCompoundCollection
from onegov.ballot import VoteCollection
from onegov.election_day.collections import DataSourceCollection
from onegov.election_day.collections import DataSourceItemCollection
from onegov.election_day.collections import EmailSubscriberCollection
from onegov.election_day.collections import ScreenCollection
from onegov.election_day.collections import SmsSubscriberCollection
from onegov.election_day.collections import UploadTokenCollection
from onegov.election_day.layouts import ManageDataSourceItemsLayout
from onegov.election_day.layouts import ManageDataSourcesLayout
from onegov.election_day.layouts import ManageElectionCompoundsLayout
from onegov.election_day.layouts import ManageElectionsLayout
from onegov.election_day.layouts import ManageScreensLayout
from onegov.election_day.layouts import ManageSubscribersLayout
from onegov.election_day.layouts import ManageUploadTokensLayout
from onegov.election_day.layouts import ManageVotesLayout
from tests.onegov.election_day.common import DummyRequest
def test_manage_layouts(session):
# Votes
layout = ManageVotesLayout(
VoteCollection(session),
DummyRequest()
)
assert layout.manage_model_link == 'VoteCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', True, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Votes', 'VoteCollection/archive', '')
]
# ... with full menu
layout = ManageVotesLayout(
VoteCollection(session),
DummyRequest()
)
layout.principal.sms_notification = 'http://example.com'
layout.principal.email_notification = True
layout.principal.wabsti_import = True
assert layout.menu == [
('Votes', 'VoteCollection/archive', True, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
('Wabsti data sources', 'DataSourceCollection/archive', False, []),
]),
('Subscribers', '', False, [
('SMS subscribers', 'SmsSubscriberCollection/archive', False, []),
(
'Email subscribers',
'EmailSubscriberCollection/archive',
False,
[]
),
(
'Trigger notifications',
'DummyPrincipal/trigger-notifications',
False,
[]
)
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Votes', 'VoteCollection/archive', '')
]
# Elections
layout = ManageElectionsLayout(
ElectionCollection(session),
DummyRequest()
)
assert layout.manage_model_link == 'ElectionCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', True, [
('Elections', 'ElectionCollection/archive', True, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Elections', 'ElectionCollection/archive', '')
]
# Election compounds
layout = ManageElectionCompoundsLayout(
ElectionCompoundCollection(session),
DummyRequest()
)
assert layout.manage_model_link == 'ElectionCompoundCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', True, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
True,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Compounds of elections', 'ElectionCompoundCollection/archive', '')
]
# Upload tokens
layout = ManageUploadTokensLayout(
UploadTokenCollection(session),
DummyRequest()
)
assert layout.manage_model_link == 'UploadTokenCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', True, [
('Upload tokens', 'UploadTokenCollection/archive', True, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Upload tokens', 'UploadTokenCollection/archive', '')
]
# Wabsti data sources
layout = ManageDataSourcesLayout(
DataSourceCollection(session),
DummyRequest()
)
layout.principal.wabsti_import = True
assert layout.manage_model_link == 'DataSourceCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', True, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
('Wabsti data sources', 'DataSourceCollection/archive', True, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Wabsti data sources', 'DataSourceCollection/archive', '')
]
# Data source items
layout = ManageDataSourceItemsLayout(
DataSourceItemCollection(session, 'source'),
DummyRequest()
)
layout.principal.wabsti_import = True
assert layout.manage_model_link == 'DataSourceItemCollection/source'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', True, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
('Wabsti data sources', 'DataSourceCollection/archive', True, []),
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Wabsti data sources', 'DataSourceCollection/archive', ''),
('Mappings', 'DataSourceItemCollection/source', '')
]
# Email subscribers
layout = ManageSubscribersLayout(
EmailSubscriberCollection(session),
DummyRequest()
)
layout.principal.email_notification = True
assert layout.manage_model_link == 'EmailSubscriberCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Subscribers', '', True, [
(
'Email subscribers',
'EmailSubscriberCollection/archive',
True,
[]
),
(
'Trigger notifications',
'DummyPrincipal/trigger-notifications',
False,
[]
)
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Email subscribers', 'EmailSubscriberCollection/archive', '')
]
# SMS subscribers
layout = ManageSubscribersLayout(
SmsSubscriberCollection(session),
DummyRequest()
)
layout.principal.sms_notification = 'http://example.com'
assert layout.manage_model_link == 'SmsSubscriberCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Subscribers', '', True, [
('SMS subscribers', 'SmsSubscriberCollection/archive', True, []),
(
'Trigger notifications',
'DummyPrincipal/trigger-notifications',
False,
[]
)
]),
('Screens', 'ScreenCollection/archive', False, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('SMS subscribers', 'SmsSubscriberCollection/archive', '')
]
# Screens
layout = ManageScreensLayout(
ScreenCollection(session),
DummyRequest()
)
assert layout.manage_model_link == 'ScreenCollection/archive'
assert layout.menu == [
('Votes', 'VoteCollection/archive', False, []),
('Elections', '', False, [
('Elections', 'ElectionCollection/archive', False, []),
(
'Compounds of elections',
'ElectionCompoundCollection/archive',
False,
[]
)
]),
('Import configuration', '', False, [
('Upload tokens', 'UploadTokenCollection/archive', False, []),
]),
('Screens', 'ScreenCollection/archive', True, [])
]
assert layout.breadcrumbs == [
('Manage', 'VoteCollection/archive', 'unavailable'),
('Screens', 'ScreenCollection/archive', '')
]
# Admin
layout = ManageScreensLayout(None, DummyRequest(is_secret=True))
assert layout.menu[-1] == (
'Administration',
'',
False,
[
(
'Update archived results',
'DummyPrincipal/update-results',
False,
[]
),
(
'Clear cache',
'DummyPrincipal/clear-cache',
False,
[]
)
]
)
|
import pandas as pd
from utils.storage import load_frame, dump_frame, DATA_PATH, check_if_stepframe, check_if_vecframe
def daySplitter(step_name, data_path=DATA_PATH):
"""
Splits entries into days and saves results as vecframe.
"""
stepframe = load_frame(step_name, data_path)
check_if_stepframe(stepframe)
vec_len = stepframe.loc[stepframe.day == 0].shape[0]
columns = ['user', 'desc'] + list(range(vec_len))
vfs = []
for day in stepframe.day.unique():
vf = stepframe[stepframe.day == day].iloc[:, 4:999+4].T.astype('int32')
vf.columns = list(range(vec_len))
vf['user'] = vf.index.to_numpy(dtype=pd.np.int)
vf['desc'] = day
vfs.append(vf)
vecframe = pd.concat(vfs, sort=False, ignore_index=True)
vecframe = vecframe[columns]
vecframe.columns = vecframe.columns.astype(str)
check_if_vecframe(vecframe)
dump_frame(vecframe, '{}_dsp'.format(step_name))
def make_weekly_vecframe(step_name, vec_name='{}_week', data_path=DATA_PATH):
'''
Transforms a stepframe into a vecframe without splittling the data.
'desc' will always be 0.
:param step_name: name of the stepframe
:param vec_name: name under which vecframe will be saved
:param data_path: optional, path to data folder
:return:
'''
stepframe = load_frame(step_name, DATA_PATH)
vecframe = stepframe.loc[:, '0':].transpose()
vecframe.columns = [str(col) for col in vecframe.columns]
vecframe['user'] = vecframe.index
vecframe['user'] = vecframe['user'].apply(int)
vecframe['desc'] = [0] * vecframe.shape[0]
cols = list(vecframe.columns)
vecframe = vecframe[cols[-2:] + cols[:-2]]
# vecframe = vecframe.reset_index(drop=True)
check_if_vecframe(vecframe)
dump_frame(vecframe, vec_name.format(step_name), data_path)
def processTime(epochsfile, step_name , data_path=DATA_PATH):
epochs=pd.read_csv(data_path+ epochsfile)
epochs = epochs[['step_count']]
epochs['day']=epochs.index.to_series().apply(lambda x: x/(4*60*24))
epochs['hour']=epochs.index.to_series().apply(lambda x: (x/(4*60)) % 24 )
epochs['minute']=epochs.index.to_series().apply(lambda x: (x/4)% 60 )
dump_frame(epochs, '{}_epochs'.format(step_name))
#return epochs
def aggregateTime(step_name, emb_name, by, units=1, data_path=DATA_PATH):
df=load_frame(step_name, data_path)
print (" This func supports EQUAL BUCKETS ONLY")
if by=='day':
groupeddf = df.groupby(['day']).apply(lambda x: x.iloc[:,4:].sum()).reset_index()
if by=='hour':
groupeddf = df.groupby(['day', 'hour']).apply(lambda x: x.iloc[:,4:].sum()).reset_index()
elif by=='minute':
groupeddf = df.groupby(['day', 'hour', 'minute']).apply(lambda x: x.iloc[:,4:].sum()).reset_index()
if units >1:
ret_df = groupeddf.groupby(groupeddf.index // units).sum()
ret_df['day'] = ret_df.day.apply(lambda x: int(x / units))
if 'hour' in ret_df.columns:
ret_df['hour'] = ret_df.hour.apply(lambda x: int(x / units))
if 'minute' in ret_df.columns:
ret_df['minute'] = ret_df.minute.apply(lambda x: int(x / units))
else:
ret_df= groupeddf
dump_frame(ret_df, name=emb_name)
def get_stats(group):
return {'min': group.min(), 'max': group.max(), 'median': group.median(), 'mean': group.mean(), 'std': group.std()}
def statistics(step_name, emb_name, by, units=1, data_path=DATA_PATH):
df=load_frame(step_name, data_path)
print ("WARNING! This func supports EQUAL BUCKETS ONLY")
if by=='hour':
groupeddf = df.groupby(['day', 'hour']).apply(get_stats).reset_index()
elif by=='minute':
groupeddf = df.groupby(['day', 'hour', 'minute']).apply(get_stats).reset_index()
if units >1:
ret_df = groupeddf.groupby(groupeddf.index // units).sum()
ret_df['day'] = ret_df.day.apply(lambda x: int(x / units))
if 'hour' in ret_df.columns:
ret_df['hour'] = ret_df.hour.apply(lambda x: int(x / units))
if 'minute' in ret_df.columns:
ret_df['minute'] = ret_df.minute.apply(lambda x: int(x / units))
else:
ret_df= groupeddf
dump_frame(ret_df, name=emb_name)
|
<gh_stars>1-10
from __future__ import division
import os, gc
import pandas as pd
import ipywidgets as widgets
from seaborn import get_dataset_names
from IPython.display import display
from glob import glob
from contextlib import suppress
class DataFrameLoader(object):
def __init__(self, filename=""):
self.path = os.getcwd()
self.files = []
self._standard_datasets = widgets.ToggleButtons(
value="tips",
options = get_dataset_names(),
disabled = False,
description = "Load a standard dataset:"
)
self._standard_datasets.style.button_width = '100px'
self._standard_datasets.style.description_width = '150px'
self._filenameText = widgets.Text(
layout = widgets.Layout(width='95%', height='30px'),
value=filename,
description = "or choose a custom one:"
)
self._filenameText.style.description_width = '150px'
self._refreshBtn = widgets.Button(
description = "Refresh files ...",
button_style = 'info',
layout = widgets.Layout(width='95%')
)
self._refreshBtn.on_click(self.refreshFileList)
self._multiSel = widgets.SelectMultiple(
options = self.files,
description = "Files: ",
layout = widgets.Layout(height='400px', width='95%')
)
self._multiSel.style.description_width = '150px'
self._loadBtn = widgets.Button(
description = "Load files ...",
button_style = 'danger',
layout = widgets.Layout(width='95%')
)
self._loadBtn.on_click(self.load)
self.data = pd.DataFrame()
def _updatePaths(self):
# self.files = glob(f"./**/{self._filenameText.value}", recursive=True)
self.files = glob(str(self._filenameText.value), recursive=True)
self._multiSel.options = self.files
def widget(self):
self._updatePaths()
box = widgets.VBox([self._standard_datasets, self._filenameText, self._refreshBtn, self._multiSel, self._loadBtn])
return box
def refreshFileList(self, button):
self._updatePaths()
def load(self, button):
del self.data
gc.collect()
self.data = pd.concat([self.load_single_dataset(p) for p in self._multiSel.value], sort=False)
gc.collect()
def load_single_dataset(self, datasetpath):
ftype = str(datasetpath).split('.')[-1]
if ftype == 'h5':
def single_column_int_downcast(col):
if col.min() < 0:
return pd.to_numeric(col, downcast="signed")
else:
return pd.to_numeric(col, downcast="unsigned")
try:
# try to read as pytables format
# this is memory efficient
df = pd.DataFrame()
for i in pd.read_hdf(datasetpath, mode='r', chunksize=1000000):
tempdf = i
# downcast to save memory
floatdf = tempdf.select_dtypes(include=['float64'])
intdf = tempdf.select_dtypes(include=['int64'])
tempdf[floatdf.columns] = floatdf.apply(pd.to_numeric, downcast="float")
tempdf[intdf.columns] = intdf.apply(single_column_int_downcast)
df = df.append(i, ignore_index=True)
gc.collect()
return df.round(3)
except:
import sys
print("WARNING: could not read as pytables hdf5 format, will now read with h5py", file=sys.stderr)
# if it fails read as h5py format
# this will blow up memory if the file is too large
return pd.read_hdf(datasetpath, mode='r').round(3)
elif ftype == 'csv':
return pd.read_csv(datasetpath).round(3)
elif ftype == 'xlsx':
return pd.read_excel(datasetpath).round(3)
elif ftype == 'json':
return pd.read_json(datasetpath).round(3)
elif ftype == 'html':
return pd.read_html(datasetpath).round(3)
else:
raise Exception(f"wrong filetype {ftype}")
raise Exception(f"could not load {datasetpath}") |
<reponame>zutn/Simple-Catchments-Hesse
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 09:44:13 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import math
import matplotlib.patches as patches
def create_data():
"""Create some random exponential data"""
#np.random.seed(18)
pure = np.array(sorted([np.random.exponential() for i in range(10)]))
noise = np.random.normal(0,1, pure.shape)
signal = pure + noise
return signal
def create_exponential_from_points(x,y):
"""Creates values corresponding to an exponential functino through the
scatter of x and y. Create a 100 points, so the curve is smooth"""
x = np.array(x)
x_new = np.linspace(x.min() - x.min(), x.max() + x.min(),100)
optimal_parameters = find_exponential_function(x,y)
y_sim = exponential(pd.DataFrame(x_new), *optimal_parameters)
x_new = pd.Series(x_new)
return x_new, y_sim
def find_exponential_function(x,y):
""" Finds the best parameter values for an exponential function"""
optimal_parameters, pcov = curve_fit(exponential, x, y)
return optimal_parameters
def exponential(x,c, k):
"""
Exponential Funktion
"""
return c*math.e**(k*x)
def plot(signal_x, signal_y, exponential_x, exponential_y):
"""Plots that shit"""
for df in [signal_x, signal_y, exponential_x, exponential_y]:
df.reset_index(drop=True, inplace=True)
ax = plt.gca()
# Draw points
ax.plot(signal_x, signal_y, linestyle="", marker="o", color = "dimgrey", label="Measured Data")
# Draw exponential function
ax.plot(exponential_x, exponential_y, color="steelblue", label="Fitted Exponential Function")
# Draw lines between exponential function and points
df_exp = pd.DataFrame(exponential_y)
df_exp.index = exponential_x
df_sig = pd.DataFrame(signal_y)
df_sig.index = [float(x) for x in signal_x]
for i in df_sig.index:
if i == 1.0:
ax.plot([i,i], [df_sig.loc[i], df_exp.loc[i]], color="dimgrey", zorder=1, linestyle="--", label="Deviation")
else:
ax.plot([i,i], [df_sig.loc[i], df_exp.loc[i]], color="dimgrey", zorder=1, linestyle="--")
# Draw the rectangles
# for i in df_sig.index:
# # Determine if the point is above or below the exponential function
# dist_sig_exp = (df_sig.loc[i] - df_exp.loc[i]).values
# below = True if dist_sig_exp < 0 else False
# if below:
# rect = patches.Rectangle(xy=(i, df_sig.loc[i]), width=abs(dist_sig_exp), height=abs(dist_sig_exp), facecolor="lightgrey",zorder=0, edgecolor="grey")
# else:
# rect = patches.Rectangle(xy=(i, df_exp.loc[i]), width=dist_sig_exp, height=dist_sig_exp, facecolor="lightgrey",zorder=0, edgecolor="grey")
# ax.add_patch(rect)
# Make it nice
plt.setp(ax.get_yticklabels(), alpha=0)
plt.setp(ax.get_xticklabels(), alpha=0)
ax.tick_params(axis=u'both', which=u'both',length=0)
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_xlabel(r"less $\longleftarrow$ Storage $\longrightarrow$ more", color="black", alpha=0.7)
ax.set_ylabel(r"less $\longleftarrow$ Discharge $\longrightarrow$ more", color="black", alpha=0.7)
legend = ax.legend()
for text in legend.get_texts():
text.set_color("dimgrey")
fig = plt.gcf()
fig.tight_layout()
fig.set_size_inches(5,10)
# Save it
plt.savefig("lse_example.png", dpi=200, bbox_inches="tight")
plt.close()
def normalize(series:pd.Series):
return ((series - series.min()) / (series.max() - series.min()))
if __name__ == "__main__":
signal = create_data()
x = pd.Series([i + 1 for i in range(signal.shape[0])])
y = pd.Series(signal)
x_new, y_sim = create_exponential_from_points(x,y)
plot(x,y, x_new, y_sim)
|
<reponame>jlin/inventory<filename>core/keyvalue/views.py
from django.shortcuts import render
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpResponse
from django.http import Http404
from core.keyvalue.utils import get_aa, get_docstrings
import simplejson as json
from core.network.models import NetworkKeyValue
from core.range.models import RangeKeyValue
from core.site.models import SiteKeyValue
from mozdns.soa.models import SOAKeyValue
from core.registration.static.models import StaticRegKeyValue
from core.vlan.models import VlanKeyValue
from core.group.models import GroupKeyValue
from core.hwadapter.models import HWAdapterKeyValue
from systems.models import KeyValue as SystemKeyValue
klasses = (
NetworkKeyValue,
RangeKeyValue,
SiteKeyValue,
SOAKeyValue,
VlanKeyValue,
SystemKeyValue,
StaticRegKeyValue,
GroupKeyValue,
HWAdapterKeyValue
)
kv_users = {}
for klass in klasses:
kv_users[klass.__name__.lower()] = klass.obj.field.related.parent_model
def process_kv(kv, obj, KVClass):
existing_kvs = []
delete_kvs = []
new_kvs = []
for k, v in kv:
if k.startswith('existing_delete_'):
try:
delete_kv = KVClass.objects.get(
pk=k.strip('existing_delete_v_')
)
except KVClass.DoesNotExist:
continue # It was deleted
delete_kvs.append(delete_kv)
elif k.startswith('existing_v_'):
try:
existing_kv = KVClass.objects.get(pk=k.strip('existing_v_'))
except KVClass.DoesNotExist:
continue # It was deleted
if existing_kv in delete_kvs:
continue
existing_kv.value = v
existing_kvs.append(existing_kv)
elif k.startswith('attr_new_key_'):
k_num = k.strip('attr_new_key_')
value_k_name = 'attr_new_value_' + k_num
for ki, vi in kv:
if ki == value_k_name:
new_kv = KVClass(key=v, value=vi, obj=obj)
new_kvs.append(new_kv)
return_attrs = []
for kv in existing_kvs + new_kvs:
try:
kv.clean()
kv.save()
return_attrs.append((kv, None))
except ValidationError, e:
return_attrs.append((kv, str(e)))
for kv in delete_kvs:
kv.delete()
def validate_keyvalue_ajax(request):
kv_class = request.POST.get('kv_class', None)
obj_pk = request.POST.get('obj_pk', None)
key = request.POST.get('key', None)
value = request.POST.get('value', None)
key_pk = request.POST.get('key_pk', None)
delete_key = request.POST.get('delete_key', None)
if not (kv_class and bool(delete_key)):
return HttpResponse(
json.dumps({'success': False, 'message': 'missing class'})
)
if not key:
return HttpResponse(
json.dumps({'success': False, 'message': 'Missing key'})
)
if not value:
return HttpResponse(
json.dumps({'success': False, 'message': 'Missing value'})
)
try:
obj, Klass = resolve_obj(kv_class, obj_pk)
except ObjectDoesNotExist:
return HttpResponse(
json.dumps(
{'success': False, 'message': 'Missing valid class info'}
)
)
if key_pk:
try:
kv = obj.keyvalue_set.get(pk=key_pk)
except Klass.DoesNotExist:
return HttpResponse(
json.dumps(
{'success': False,
'message': "Can't find that Key Value pair."}
)
)
if delete_key != 'true':
kv.value = value
else:
kv = Klass(key=key, value=value, obj=obj)
try:
kv.validate_unique()
kv.clean()
except ValidationError, e:
return HttpResponse(
json.dumps({'success': False, 'message': e.messages[0]})
)
return HttpResponse(json.dumps({'success': True}))
def resolve_class(obj_class):
if obj_class not in kv_users:
raise Http404()
return kv_users[obj_class]
def resolve_obj(obj_class, obj_pk):
if obj_class not in kv_users:
raise Http404("Can't find this kv object")
if obj_class.lower() not in kv_users:
raise Http404()
Klass = kv_users[obj_class.lower()]
try:
obj = Klass.objects.get(pk=obj_pk)
except Klass.DoesNotExist:
raise Http404()
return obj, obj.keyvalue_set.model
def keyvalue(request, obj_class, obj_pk):
obj, KVKlass = resolve_obj(obj_class, obj_pk)
if request.method == 'POST':
process_kv(request.POST.items(), obj, KVKlass)
attrs = obj.keyvalue_set.all()
aa_options = get_aa(obj.keyvalue_set.model)
docs = get_docstrings(KVKlass)
return render(request, 'keyvalue/keyvalue.html', {
'kv_class': obj_class,
'obj_pk': obj.pk,
'attrs': attrs,
'object': obj,
'aa_options': aa_options,
'existing_keyvalue': attrs,
'docs': docs,
})
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Competition(models.Model):
name = models.CharField(max_length=128, blank=False)
introduction = models.CharField(max_length=8096, blank=False, default="")
parameters_description = models.CharField(max_length=1024, blank=False)
goal = models.CharField(max_length=128, blank=False, default="MAXIMIZE")
computation_budge = models.IntegerField(blank=False)
theoretical_best_metrics = models.FloatField(blank=True, null=True)
current_best_metrics = models.FloatField(blank=True, null=True)
status = models.CharField(max_length=128, blank=False)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}_{}_{}".format(self.name, self.parameters_description, self.goal)
@classmethod
def create(cls,
name,
introduction,
parameters_description,
goal,
computation_budge,
theoretical_best_metrics=None):
instance = cls()
instance.name = name
instance.introduction = introduction
instance.parameters_description = parameters_description
instance.goal = goal
instance.computation_budge = computation_budge
instance.status = "Active"
instance.theoretical_best_metrics = theoretical_best_metrics
instance.save()
return instance
@classmethod
def create_from_dict(self, dict):
competition = Competition.create(dict["name"], dict["introduction"],
dict["parameters_description"],
dict["goal"], dict["computation_budge"])
if "theoretical_best_metrics" in dict:
competition.theoretical_best_metrics = dict["theoretical_best_metrics"]
competition.save()
return competition
def to_json(self):
return {
"id": self.id,
"name": self.name,
"introduction": self.introduction,
"parameters_description": self.parameters_description,
"goal": self.goal,
"computation_budge": self.computation_budge,
"theoretical_best_metrics": self.theoretical_best_metrics,
"current_best_metrics": self.current_best_metrics,
"status": self.status,
"created_time": self.created_time,
"updated_time": self.updated_time
}
class Participation(models.Model):
competition = models.ForeignKey(Competition, related_name="competition")
username = models.CharField(max_length=128, blank=False)
email = models.CharField(max_length=128, blank=False)
current_best_metrics = models.FloatField(blank=True, null=True)
current_trial_count = models.IntegerField(blank=False, default=0)
status = models.CharField(max_length=128, blank=False)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}_{}_{}".format(self.competition, self.username, self.email)
@classmethod
def create(cls, competition, username, email):
instance = cls()
instance.competition = competition
instance.username = username
instance.email = email
instance.status = "Initialized"
instance.save()
return instance
@classmethod
def create_from_dict(self, dict):
return Participation.create(dict["competition"], dict["username"],
dict["email"])
def to_json(self):
return {
"id": self.id,
"competition": self.competition.to_json(),
"username": self.username,
"email": self.email,
"current_best_metrics": self.current_best_metrics,
"current_trial_count": self.current_trial_count,
"status": self.status,
"created_time": self.created_time,
"updated_time": self.updated_time
}
class Trial(models.Model):
participation = models.ForeignKey(
Participation, related_name="participation")
parameters_instance = models.CharField(max_length=1024, blank=False)
metrics = models.FloatField(blank=True, null=True)
status = models.CharField(max_length=128, blank=False)
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
def __str__(self):
return "{}".format(self.parameters_instance)
@classmethod
def create(cls, participation, parameters_instance):
instance = cls()
instance.participation = participation
instance.parameters_instance = parameters_instance
instance.status = "NotExecuted"
instance.save()
return instance
@classmethod
def create_from_dict(self, dict):
return Trial.create(dict["participation"], dict["parameters_instance"])
def to_json(self):
return {
"id": self.id,
"participation": self.participation.to_json(),
"parameters_instance": self.parameters_instance,
"metrics": self.metrics,
"status": self.status,
"created_time": self.created_time,
"updated_time": self.updated_time
}
|
import logging
import salt.exceptions
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
# pylint: disable=no-name-in-module
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
log = logging.getLogger(__name__)
def get_hosts(
service_instance,
datacenter_name=None,
host_names=None,
cluster_name=None,
get_all_hosts=False,
):
"""
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
"""
properties = ["name"]
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
"Must specify the datacenter when specifying the cluster"
)
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = utils_common.get_root_folder(service_instance)
else:
start_point = utils_datacenter.get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append("parent")
# Search for the objects
hosts = utils_common.get_mors_with_properties(
service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties,
)
log.trace("Retrieved hosts: %s", [h["name"] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h["parent"], vim.ClusterComputeResource):
continue
parent_name = utils_common.get_managed_object_name(h["parent"])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h["object"])
continue
if h["name"] in host_names:
filtered_hosts.append(h["object"])
return filtered_hosts
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB, host_cache_manager=None):
"""
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
"""
hostname = utils_common.get_managed_object_name(host_ref)
if not host_cache_manager:
props = utils_common.get_properties_of_managed_object(
host_ref, ["configManager.cacheConfigurationManager"]
)
if not props.get("configManager.cacheConfigurationManager"):
raise salt.exceptions.VMwareObjectRetrievalError(
"Host '{}' has no host cache".format(hostname)
)
host_cache_manager = props["configManager.cacheConfigurationManager"]
log.trace(
"Configuring the host cache on host '%s', datastore '%s', " "swap size=%s MiB",
hostname,
datastore_ref.name,
swap_size_MiB,
)
spec = vim.HostCacheConfigurationSpec(datastore=datastore_ref, swapSize=swap_size_MiB)
log.trace("host_cache_spec=%s", spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
"Not enough permissions. Required privilege: " "{}".format(exc.privilegeId)
)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
utils_common.wait_for_task(task, hostname, "HostCacheConfigurationTask")
log.trace("Configured host cache on host '%s'", hostname)
return True
def list_hosts(service_instance):
"""
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
"""
return utils_common.list_objects(service_instance, vim.HostSystem)
|
<reponame>ubcbraincircuits/pyDynamo
from PyQt5.QtGui import QPen, QPainter, QBrush, QFont, QColor
from PyQt5.QtCore import Qt, QPointF, QRectF
import matplotlib.pyplot as plt
import numpy as np
import pydynamo_brain.util as util
from .branchToColorMap import BranchToColorMap
_BRANCH_TO_COLOR_MAP = BranchToColorMap()
"""
White dot = point on this plane
Green dot = current point
Solid line = line with end on this plne
Dashed line = line without end on this plane
Only draw ones with Z < 3 difference, UNLESS all are drawn
"""
class DendritePainter():
NODE_CIRCLE_PEN = QPen(QBrush(Qt.black), 1, Qt.SolidLine)
NODE_CIRCLE_BRUSH = QBrush(Qt.white)
NODE_CIRCLE_SELECTED_BRUSH = QBrush(Qt.cyan)
NODE_CIRCLE_MOVING_BRUSH = QBrush(Qt.red)
NODE_CIRCLE_REPARENTING_BRUSH = QBrush(Qt.blue)
NODE_CIRCLE_DEFAULT_RADIUS = 5
MARKED_CIRCLE_BRUSH = QBrush(QColor(255, 108, 180))
ANNOTATION_PEN = QPen(QBrush(Qt.yellow), 1, Qt.SolidLine)
ANNOTATION_FONT = QFont("Arial", 12, QFont.Bold)
ANNOTATION_OFFSET = 20
ANNOTATION_HEIGHT = 40
ANNOTATION_MAX_WIDTH = 512
def __init__(self, painter, uiState, zoomMapFunc, zoomDistFunc):
self.p = painter
self.uiState = uiState
self.zAt = self.uiState.zAxisAt
self.zoomMapFunc = zoomMapFunc
self.zoomDistFunc = zoomDistFunc
def drawTree(self, tree):
if self.uiState.hideAll:
return # Hidden, no paint for you.
selectedPoint = self.uiState.currentPoint()
selectedPointID = None if selectedPoint is None else selectedPoint.id
for branch in tree.branches:
self.drawBranchLines(branch)
if tree.rootPoint is not None:
self.drawPoint(tree.rootPoint, selectedPointID)
for branch in tree.branches:
self.drawBranchPoints(branch, selectedPointID)
def drawBranchLines(self, branch):
for i in range(len(branch.points)):
previousPoint = branch.parentPoint if i == 0 else branch.points[i - 1]
if previousPoint is None:
continue # In theory should not get triggered, but added just in case.
lastX, lastY, lastZ = self.zoomedLocation(previousPoint.location)
thisX, thisY, thisZ = self.zoomedLocation(branch.points[i].location)
linePen = self.getLinePen(branch, lastZ, thisZ)
if linePen is not None:
self.p.setPen(linePen)
self.p.drawLine(lastX, lastY, thisX, thisY)
def drawBranchPoints(self, branch, selectedPointID):
for i in range(len(branch.points)):
self.drawPoint(branch.points[i], selectedPointID)
def drawPoint(self, point, selectedPointID):
x, y, z = self.zoomedLocation(point.location)
if round(z) == self.zAt:
# Hilighting has been removed, keep here for backwards compatibility
marked = point.manuallyMarked or point.hilighted
self.drawCircleThisZ(x, y,
point.id == selectedPointID, marked,
self.uiState.parent().dotSize, point.radius
)
self.maybeDrawText(x, y, point)
def drawCircleThisZ(self, x, y, isSelected, isMarked, fakeRadius, realRadius):
radius = fakeRadius
resizeRadius = False
if radius is None:
radius = realRadius
resizeRadius = (realRadius is not None)
if radius is None:
radius = self.NODE_CIRCLE_DEFAULT_RADIUS
brushColor = self.NODE_CIRCLE_BRUSH
if isSelected:
brushColor = self.NODE_CIRCLE_SELECTED_BRUSH
if self.uiState.isMoving():
brushColor = self.NODE_CIRCLE_MOVING_BRUSH
elif self.uiState.isReparenting():
brushColor = self.NODE_CIRCLE_REPARENTING_BRUSH
elif isMarked and self.uiState.showMarked:
brushColor = self.MARKED_CIRCLE_BRUSH
self.p.setPen(self.NODE_CIRCLE_PEN)
self.p.setBrush(brushColor)
if resizeRadius:
radiusX, radiusY = self.zoomDistFunc(radius, radius)
else:
radiusX, radiusY = radius, radius
self.p.drawEllipse(QPointF(x, y), radiusX, radiusY)
def maybeDrawText(self, x, y, point):
if not self.uiState.showAnnotations and not self.uiState.showIDs:
return
text = ""
if self.uiState.showIDs:
text = point.id
if self.uiState.showAnnotations:
text = point.annotation
if text == "":
return
self.p.setFont(self.ANNOTATION_FONT)
self.p.setPen(self.ANNOTATION_PEN)
textRect = QRectF(
x + self.ANNOTATION_OFFSET, y - self.ANNOTATION_HEIGHT / 2,
self.ANNOTATION_MAX_WIDTH, self.ANNOTATION_HEIGHT
)
self.p.drawText(textRect, Qt.AlignVCenter, text)
def getLinePen(self, branch, z1, z2):
color = _BRANCH_TO_COLOR_MAP.colorForBranch(branch)
same1, same2 = round(z1) == self.zAt, round(z2) == self.zAt
near1, near2 = self.isNearZ(z1), self.isNearZ(z2)
drawAll = (self.uiState.branchDisplayMode == 1)
drawNear = not (self.uiState.branchDisplayMode == 2)
if same1 or same2:
return QPen(QBrush(color), self.uiState.parent().lineWidth, Qt.SolidLine)
elif drawNear and (near1 or near2 or drawAll):
return QPen(QBrush(color), self.uiState.parent().lineWidth, Qt.DotLine)
else:
return None
def zoomedLocation(self, xyz):
x, y, z = xyz
zoomedXY = self.zoomMapFunc(x, y)
return (zoomedXY.x(), zoomedXY.y(), z)
# HACK - utilities
def isNearZ(self, z):
return abs(z - self.zAt) < 3
|
# Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import tensorflow as tf
from bmtk.simulator.mintnet.Image_Library import Image_Library
import os
import h5py
import pandas as pd
class S_Layer (object):
def __init__(self, node_name, C_Layer_input, grid_size, pool_size, K, file_name=None, randomize=False):
self.node_name = node_name
self.input = C_Layer_input.input
self.tf_sess = C_Layer_input.tf_sess
#self.input_layer = C_Layer_input
# c_output should be a dictionary indexed over bands
c_output = C_Layer_input.output
self.C_Layer_input = C_Layer_input
self.K = K
self.input_K = C_Layer_input.K
self.grid_size = grid_size
self.pool_size = pool_size
self.band_output = {}
#self.band_filters = {}
self.band_shape = C_Layer_input.band_shape
#print self.band_shape
file_open = False
if file_name==None:
self.train_state=False
new_weights = True
else:
self.weight_file = file_name
weight_h5 = h5py.File(self.weight_file, 'a')
file_open = True
if self.node_name in weight_h5.keys():
new_weights=False
weight_data = weight_h5[self.node_name]['weights']
self.train_state = weight_h5[self.node_name]['train_state'].value
else:
new_weights=True
self.train_state = False
weight_h5.create_group(self.node_name)
#weight_h5[self.node_name].create_group('weights')
weight_h5[self.node_name]['train_state']=self.train_state
# perform checks to make sure weight_file is consistent with the Layer parameters
# check input bands
# check grid_size, pool_size, K
with tf.name_scope(self.node_name):
#for band in c_output.keys():
if new_weights:
# if self.grid_size >= self.band_shape[band][0]:
# size_y = self.band_shape[band][0]
# else:
# size_y = grid_size
# if self.grid_size >= self.band_shape[band][1]:
# size_x = self.band_shape[band][1]
# else:
# size_x = grid_size
w_shape = np.array([self.grid_size,self.grid_size,self.input_K,self.K])
self.w_shape = w_shape
w_bound = np.sqrt(np.prod(w_shape[1:]))
if randomize:
W = np.random.uniform(low= -1.0/w_bound, high=1.0/w_bound, size=w_shape).astype(np.float32)
else:
W = np.zeros(w_shape).astype(np.float32)
if file_name!=None:
weight_h5[self.node_name].create_dataset('weights',shape=w_shape,dtype=np.float32)
else:
# Need to check that c_output.keys() has the same set of keys that weight_dict is expecting
W = weight_data.value
self.w_shape = W.shape
W = tf.Variable(W,trainable=False,name='W')
W.initializer.run(session=self.tf_sess)
#self.band_filters[band]= W
self.weights = W
for band in c_output.keys():
W_slice = W[:self.band_shape[band][0],:self.band_shape[band][1]]
input_norm = tf.expand_dims(tf.reduce_sum(c_output[band]*c_output[band],[1,2]),1) #,[-1,1,1,self.input_K])
input_norm = tf.expand_dims(input_norm,1)
normalized_input = tf.div(c_output[band],tf.maximum(tf.sqrt(input_norm),1e-12))
self.band_output[band] = tf.nn.conv2d(normalized_input,W_slice,strides=[1,1,1,1],padding='SAME')
self.output = self.band_output
self.num_units = 0
for b in self.band_shape:
self.num_units += np.prod(self.band_shape[b])*self.K
if file_open:
weight_h5.close()
def __repr__(self):
return "S_Layer"
def compute_output(self,X,band):
return self.tf_sess.run(self.output[band],feed_dict={self.input:X})
def find_band_and_coords_for_imprinting_unit(self, imprinting_unit_index):
cumulative_units = 0
for band in self.C_Layer_input.output:
units_in_next_band = int(np.prod(self.C_Layer_input.output[band].get_shape()[1:3]))
if imprinting_unit_index < cumulative_units + units_in_next_band:
# found the right band!
yb, xb = self.C_Layer_input.band_shape[band]
band_index = imprinting_unit_index - cumulative_units
y = band_index/xb
x = band_index%xb
break
else:
cumulative_units += units_in_next_band
return band, y, x
def get_total_pixels_in_C_Layer_input(self):
total = 0
band_shape = self.C_Layer_input.band_shape
band_ids = band_shape.keys()
band_ids.sort()
for band in band_ids:
total += np.prod(band_shape[band])
return total
def get_patch_bounding_box_and_shift(self,band,y,x):
y_lower = y - self.grid_size/2
y_upper = y_lower + self.grid_size
x_lower = x - self.grid_size/2
x_upper = x_lower + self.grid_size
yb, xb = self.C_Layer_input.band_shape[band]
# compute shifts in lower bound to deal with overlap with the edges
y_shift_lower = np.max([-y_lower,0])
x_shift_lower = np.max([-x_lower,0])
y_lower = np.max([y_lower,0])
y_upper = np.min([y_upper,yb])
x_lower = np.max([x_lower,0])
x_upper = np.min([x_upper,xb])
y_shift_upper = y_shift_lower + y_upper - y_lower
x_shift_upper = x_shift_lower + x_upper - x_lower
return y_lower, y_upper, x_lower, x_upper, y_shift_lower, y_shift_upper, x_shift_lower, x_shift_upper
def train(self,image_dir,batch_size=100,image_shape=(256,256)): #,save_file='weights.pkl'):
print("Training")
im_lib = Image_Library(image_dir,new_size=image_shape)
new_weights = np.zeros(self.w_shape).astype(np.float32)
for k in range(self.K):
if k%10==0:
print("Imprinting feature ", k)
# how to handle the randomly picked neuron; rejection sampling?
imprinting_unit_index = np.random.randint(self.get_total_pixels_in_C_Layer_input())
#print "Imprinting unit index ", imprinting_unit_index
band, y, x = self.find_band_and_coords_for_imprinting_unit(imprinting_unit_index)
#print "Imprinting unit in band ", band, " at ", (y, x)
im_data = im_lib(1)
output = self.C_Layer_input.compute_output(im_data,band)
# grab weights from chosen unit, save them to new_weights
y_lower, y_upper, x_lower, x_upper, y_shift_lower, y_shift_upper, x_shift_lower, x_shift_upper = self.get_patch_bounding_box_and_shift(band,y,x)
w_patch = output[0,y_lower:y_upper,x_lower:x_upper,:].copy()
#print "(y_lower, y_upper), (x_lower, x_upper) = ", (y_lower, y_upper), (x_lower, x_upper)
#print "Patch shape = ", w_patch.shape
patch_size = np.prod(w_patch.shape)
# print "self.w_shape = ", self.w_shape, " patch_size = ", patch_size, " pool_size = ", self.pool_size
# print "band, y, x = ", band,y,x
pool_size = np.min([self.pool_size,patch_size])
pool_mask_indices = np.random.choice(np.arange(patch_size), size=pool_size, replace=False)
pool_mask = np.zeros(patch_size,dtype=np.bool)
pool_mask[pool_mask_indices] = True
pool_mask.resize(w_patch.shape)
pool_mask = np.logical_not(pool_mask) # we want a mask for the indices to zero out
w_patch[pool_mask] = 0.0
# will need to enlarge w_patch if the edges got truncated
new_weights[y_shift_lower:y_shift_upper,x_shift_lower:x_shift_upper,:,k] = w_patch
# old code starts here
# num_batches = self.K/batch_size
# if self.K%batch_size!=0:
# num_batches = num_batches+1
self.tf_sess.run(self.weights.assign(new_weights))
print()
print("Saving weights to file in ", self.weight_file)
weight_h5 = h5py.File(self.weight_file,'a')
#for band in new_weights:
weight_h5[self.node_name]['weights'][...] = new_weights
weight_h5[self.node_name]['train_state'][...]=True
weight_h5.close()
# def get_compute_ops(self):
#
# node_table = pd.DataFrame(columns=['node','band'])
# compute_list = []
#
# for band in self.band_output:
# node_table = node_table.append(pd.DataFrame([[self.node_name,band]],columns=['node','band']),ignore_index=True)
#
# compute_list.append(self.output[band])
#
# return node_table, compute_list
def get_compute_ops(self,unit_table=None):
compute_list = []
if unit_table is not None:
for i, row in unit_table.iterrows():
if 'y' in unit_table:
node, band, y, x = row['node'], int(row['band']), int(row['y']), int(row['x'])
compute_list.append(self.output[band][:,y,x,:])
elif 'band' in unit_table:
node, band = row['node'], int(row['band'])
compute_list.append(self.output[band])
else:
return self.get_all_compute_ops()
else:
return self.get_all_compute_ops()
return unit_table, compute_list
def get_all_compute_ops(self):
compute_list = []
unit_table = pd.DataFrame(columns=['node','band'])
for band in self.band_output:
unit_table = unit_table.append(pd.DataFrame([[self.node_name,band]],columns=['node','band']),ignore_index=True)
compute_list.append(self.output[band])
return unit_table, compute_list
def test_S_Layer_ouput():
from S1_Layer import S1_Layer
import matplotlib.pyplot as plt
from C_Layer import C_Layer
fig_dir = 'Figures'
# First we need an S1 Layer
# these parameters are taken from Serre, et al PNAS for HMAX
freq_channel_params = [ [7,2.8,3.5],
[9,3.6,4.6],
[11,4.5,5.6],
[13,5.4,6.8],
[15,6.3,7.9],
[17,7.3,9.1],
[19,8.2,10.3],
[21,9.2,11.5],
[23,10.2,12.7],
[25,11.3,14.1],
[27,12.3,15.4],
[29,13.4,16.8],
[31,14.6,18.2],
[33,15.8,19.7],
[35,17.0,21.2],
[37,18.2,22.8],
[39,19.5,24.4]]
orientations = np.arange(4)*np.pi/4
input_shape = (128,192)
s1 = S1_Layer(input_shape,freq_channel_params,orientations)
# Now we need to define a C1 Layer
bands = [ [[0,1], 8, 3],
[[2,3], 10, 5],
[[4,5], 12, 7],
[[6,7], 14, 8],
[[8,9], 16, 10],
[[10,11], 18, 12],
[[12,13], 20, 13],
[[14,15,16], 22, 15]]
c1 = C_Layer(s1,bands)
grid_size = 3
pool_size = 10
K = 10
s2 = S_Layer('s2',c1,grid_size,pool_size,K,file_name='S_test_file.h5',randomize=False)
# Test s2 on an image
image_dir = '/Users/michaelbu/Code/HCOMP/SampleImages'
im_lib = Image_Library(image_dir,new_size=input_shape)
image_data = im_lib(1)
fig, ax = plt.subplots(1)
ax.imshow(image_data[0,:,:,0],cmap='gray')
fig,ax = plt.subplots(8,10)
result = {}
for b in range(len(bands)):
result[b] = s2.compute_output(image_data,b)
for k in range(K):
ax[b,k].imshow(result[b][0,:,:,k],interpolation='nearest',cmap='gray')
ax[b,k].axis('off')
fig.savefig(os.path.join(fig_dir,'s2_layer.tiff'))
plt.show()
s2.train(image_dir,batch_size=10,image_shape=input_shape) #,save_file='test_weights.pkl')
if __name__=='__main__':
test_S_Layer_ouput()
|
import sys
import gym
import pylab
import random
import numpy as np
from collections import deque
import torch
from torch import nn, optim
import torch.nn.functional as F
'''
일단 하이퍼파라미터에 None이라고 되어있는 부분 위주로 수정해주세요. (다른 것들 잘못 건드시면 안될수도 있음)
cartpole_dqn.py에 있는 예제 그대로 복사하셔도 됩니다.
하지만 이것 저것 수정해 보시면서 더 좋은 에이전트를 만들어 보는 것도 좋을 것 같습니다.
'''
# 최대로 실행할 에피소드 수를 설정합니다.
EPISODES = 2000
# 카트폴 예제에서의 DQN 에이전트
class DQNAgent:
def __init__(self, state_size, action_size):
'''
구글 colab에서는 아래 render를 True로 만들면 실행이 안됩니다.
'''
self.render = False
'''
저장해 놓은 신경망 모델을 가져올 지 선택합니다. (lunarlander_trainded.h5)
훈련을 중간에 중단해 놓았다가 다시 시작하려면 아래를 True로 바꾸고 실행하시면 됩니다.
'''
self.load_model = False
# 상태와 행동의 크기 정의
self.state_size = state_size
self.action_size = action_size
# DQN 하이퍼파라미터
'''
일단 None이라고 되어있는 부분 위주로 수정해주세요. (다른 것들 잘못 건드시면 안될수도 있음)
아래 8개 하이퍼파라미터(maxlen 포함)는 cartpole_dqn 예제 그대로 복사하셔도 되고, 좀 수정하셔도 됩니다.
'''
self.discount_factor = 0.99
self.learning_rate = None
self.epsilon = 1.0
self.epsilon_decay = None
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 10000
# 리플레이 메모리, 최대 크기 10000
self.memory = deque(maxlen=20000)
# 모델과 타깃 모델 생성
'''
아마 그냥 실행하시면 오류가 날텐데
build_model을 완성하시면 오류가 사라집니다.
'''
self.model = self.build_model()
self.target_model = self.build_model()
self.optimizer = optim.Adam(
self.model.parameters(), lr=self.learning_rate)
# 타깃 모델 초기화
self.update_target_model()
if self.load_model:
self.model.load_state_dict(torch.load(
'./save_model/lunarlander_dqn_trained.bin'))
# 상태가 입력, 큐함수가 출력인 인공신경망 생성
def build_model(self):
'''
cartpole_dqn 파일의 예제를 그대로 사용하셔도 되고,
좀 수정하셔도 됩니다.
수정하신 뒤에는 아래에 있는 pass를 지워주세요.
'''
pass
# 타깃 모델을 모델의 가중치로 업데이트
def update_target_model(self):
self.target_model.load_state_dict(self.model.state_dict())
# 입실론 탐욕 정책으로 행동 선택
def get_action(self, state):
if np.random.rand() <= self.epsilon:
# 무작위 행동 반환
return torch.LongTensor([[random.randrange(2)]])
else:
# 모델로부터 행동 산출
return self.model(state).data.max(1)[1].view(1, 1)
# 샘플 <s, a, r, s'>을 리플레이 메모리에 저장
def append_sample(self, state, action, reward, next_state, done):
reward = torch.FloatTensor([reward])
next_state = torch.FloatTensor([next_state])
done = torch.FloatTensor([done])
self.memory.append((state, action, reward, next_state, done))
# 리플레이 메모리에서 무작위로 추출한 배치로 모델 학습
def train_model(self):
# 메모리에서 배치 크기만큼 무작위로 샘플 추출
batch = random.sample(self.memory, self.batch_size)
states, actions, rewards, next_states, dones = zip(*batch)
states = torch.cat(states)
actions = torch.cat(actions)
rewards = torch.cat(rewards)
next_states = torch.cat(next_states)
dones = torch.cat(dones)
# 현재 상태에 대한 모델의 큐함수
# 다음 상태에 대한 타깃 모델의 큐함수
current_q = self.model(states).gather(1, actions)
max_next_q = self.target_model(next_states).detach().max(1)[0]
expected_q = rewards + (self.discount_factor * max_next_q)
# 벨만 최적 방정식을 이용한 업데이트 타깃
self.optimizer.zero_grad()
loss = F.mse_loss(current_q.squeeze(), expected_q)
loss.backward()
self.optimizer.step()
if __name__ == "__main__":
# LunarLander-v2 환경, 최대 타임스텝 수가 500
env = gym.make('LunarLander-v2')
env.seed(0)
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
scores, episodes = [], []
for e in range(EPISODES):
done = False
score = 0
# env 초기화
state = env.reset()
while not done:
if agent.render:
env.render()
# 현재 상태로 행동을 선택
state = torch.FloatTensor([state])
action = agent.get_action(state)
# 선택한 행동으로 환경에서 한 타임스텝 진행
next_state, reward, done, info = env.step(action.item())
# 리플레이 메모리에 샘플 <s, a, r, s'> 저장
agent.append_sample(state, action, reward, next_state, done)
# 매 타임스텝마다 학습
if len(agent.memory) >= agent.train_start:
agent.train_model()
score += reward
state = next_state
if done:
# 각 에피소드마다 타깃 모델을 모델의 가중치로 업데이트
agent.update_target_model()
# 100 에피소드마다 학습 결과
scores.append(score)
episodes.append(e)
pylab.plot(episodes, scores, 'b')
pylab.savefig("./save_graph/lunarlander_dqn.png")
print("episode:", e, " score:", score, " memory length:",
len(agent.memory), " epsilon:", agent.epsilon)
if e != 0 and e % 100 == 0:
print("Saved!")
torch.save(agent.model.state_dict(),
"./save_model/lunarlander_dqn.bin")
# 이전 50개 에피소드의 점수 평균이 200보다 크면 학습 중단
if np.mean(scores[-min(50, len(scores)):]) > 200:
print("Success!")
torch.save(agent.model.state_dict(),
"./save_model/lunarlander_dqn.bin")
sys.exit()
if len(agent.memory) >= agent.train_start and agent.epsilon > agent.epsilon_min:
agent.epsilon *= agent.epsilon_decay
|
<reponame>sherry255/locust<gh_stars>0
# -*- coding: utf-8 -*-
import csv
import json
import sys
import traceback
import gevent
import requests
from gevent import pywsgi
from locust import events, runners, stats, web
from locust.core import Locust
from locust.main import parse_options
from locust.runners import LocustRunner
from six.moves import StringIO
from .testcases import LocustTestCase
ALTERNATIVE_HOST = 'http://localhost'
SWARM_DATA_WITH_HOST = {'locust_count': 5, 'hatch_rate': 5, 'host': ALTERNATIVE_HOST}
SWARM_DATA_WITH_NO_HOST = {'locust_count': 5, 'hatch_rate': 5}
class TestWebUI(LocustTestCase):
def setUp(self):
super(TestWebUI, self).setUp()
stats.global_stats.clear_all()
parser = parse_options()[0]
self.options = parser.parse_args([])
runners.locust_runner = LocustRunner([], self.options)
web.request_stats.clear_cache()
self._web_ui_server = pywsgi.WSGIServer(('127.0.0.1', 0), web.app, log=None)
gevent.spawn(lambda: self._web_ui_server.serve_forever())
gevent.sleep(0.01)
self.web_port = self._web_ui_server.server_port
def tearDown(self):
super(TestWebUI, self).tearDown()
runners.locust_runner = None
self._web_ui_server.stop()
def test_index(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/" % self.web_port).status_code)
def test_stats_no_data(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).status_code)
def test_stats(self):
stats.global_stats.log_request("GET", "/<html>", 120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.text)
self.assertEqual(2, len(data["stats"])) # one entry plus Aggregated
self.assertEqual("/<html>", data["stats"][0]["name"])
self.assertEqual("/<html>", data["stats"][0]["safe_name"])
self.assertEqual("GET", data["stats"][0]["method"])
self.assertEqual(120, data["stats"][0]["avg_response_time"])
self.assertEqual("Aggregated", data["stats"][1]["name"])
self.assertEqual(1, data["stats"][1]["num_requests"])
self.assertEqual(120, data["stats"][1]["avg_response_time"])
def test_stats_cache(self):
stats.global_stats.log_request("GET", "/test", 120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.text)
self.assertEqual(2, len(data["stats"])) # one entry plus Aggregated
# add another entry
stats.global_stats.log_request("GET", "/test2", 120, 5612)
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text)
self.assertEqual(2, len(data["stats"])) # old value should be cached now
web.request_stats.clear_cache()
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text)
self.assertEqual(3, len(data["stats"])) # this should no longer be cached
def test_stats_rounding(self):
stats.global_stats.log_request("GET", "/test", 1.39764125, 2)
stats.global_stats.log_request("GET", "/test", 999.9764125, 1000)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.text)
self.assertEqual(1, data["stats"][0]["min_response_time"])
self.assertEqual(1000, data["stats"][0]["max_response_time"])
def test_request_stats_csv(self):
stats.global_stats.log_request("GET", "/test2", 120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests/csv" % self.web_port)
self.assertEqual(200, response.status_code)
def test_distribution_stats_csv(self):
for i in range(19):
stats.global_stats.log_request("GET", "/test2", 400, 5612)
stats.global_stats.log_request("GET", "/test2", 1200, 5612)
response = requests.get("http://127.0.0.1:%i/stats/distribution/csv" % self.web_port)
self.assertEqual(200, response.status_code)
rows = response.text.split("\n")
# check that /test2 is present in stats
row = rows[len(rows)-2].split(",")
self.assertEqual('"GET /test2"', row[0])
# check total row
total_cols = rows[len(rows)-1].split(",")
self.assertEqual('"Aggregated"', total_cols[0])
# verify that the 95%, 98%, 99% and 100% percentiles are 1200
for value in total_cols[-4:]:
self.assertEqual('1200', value)
def test_failure_stats_csv(self):
stats.global_stats.log_error("GET", "/", Exception("Error1337"))
response = requests.get("http://127.0.0.1:%i/stats/failures/csv" % self.web_port)
self.assertEqual(200, response.status_code)
def test_request_stats_with_errors(self):
stats.global_stats.log_error("GET", "/", Exception("Error1337"))
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertIn("Error1337", response.text)
def test_reset_stats(self):
try:
raise Exception(u"A cool test exception")
except Exception as e:
tb = sys.exc_info()[2]
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
stats.global_stats.log_request("GET", "/test", 120, 5612)
stats.global_stats.log_error("GET", "/", Exception("Error1337"))
response = requests.get("http://127.0.0.1:%i/stats/reset" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertEqual({}, stats.global_stats.errors)
self.assertEqual({}, runners.locust_runner.exceptions)
self.assertEqual(0, stats.global_stats.get("/", "GET").num_requests)
self.assertEqual(0, stats.global_stats.get("/", "GET").num_failures)
self.assertEqual(0, stats.global_stats.get("/test", "GET").num_requests)
self.assertEqual(0, stats.global_stats.get("/test", "GET").num_failures)
def test_exceptions(self):
try:
raise Exception(u"A cool test exception")
except Exception as e:
tb = sys.exc_info()[2]
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
response = requests.get("http://127.0.0.1:%i/exceptions" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertIn("A cool test exception", response.text)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
def test_exceptions_csv(self):
try:
raise Exception("Test exception")
except Exception as e:
tb = sys.exc_info()[2]
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
response = requests.get("http://127.0.0.1:%i/exceptions/csv" % self.web_port)
self.assertEqual(200, response.status_code)
reader = csv.reader(StringIO(response.text))
rows = []
for row in reader:
rows.append(row)
self.assertEqual(2, len(rows))
self.assertEqual("Test exception", rows[1][1])
self.assertEqual(2, int(rows[1][0]), "Exception count should be 2")
def test_swarm_host_value_specified(self):
response = requests.post("http://127.0.0.1:%i/swarm" % self.web_port, data=SWARM_DATA_WITH_HOST)
self.assertEqual(200, response.status_code)
self.assertEqual(runners.locust_runner.host, SWARM_DATA_WITH_HOST['host'])
def test_swarm_host_value_not_specified(self):
response = requests.post("http://127.0.0.1:%i/swarm" % self.web_port, data=SWARM_DATA_WITH_NO_HOST)
self.assertEqual(200, response.status_code)
self.assertEqual(runners.locust_runner.host, None)
def test_host_value_from_locust_class(self):
class MyLocust(Locust):
host = "http://example.com"
runners.locust_runner = LocustRunner([MyLocust], options=self.options)
response = requests.get("http://127.0.0.1:%i/" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertIn("http://example.com", response.content.decode("utf-8"))
self.assertNotIn("setting this will override the host on all Locust classes", response.content.decode("utf-8"))
def test_host_value_from_multiple_locust_classes(self):
class MyLocust(Locust):
host = "http://example.com"
class MyLocust2(Locust):
host = "http://example.com"
runners.locust_runner = LocustRunner([MyLocust, MyLocust2], options=self.options)
response = requests.get("http://127.0.0.1:%i/" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertIn("http://example.com", response.content.decode("utf-8"))
self.assertNotIn("setting this will override the host on all Locust classes", response.content.decode("utf-8"))
def test_host_value_from_multiple_locust_classes_different_hosts(self):
class MyLocust(Locust):
host = None
class MyLocust2(Locust):
host = "http://example.com"
runners.locust_runner = LocustRunner([MyLocust, MyLocust2], options=self.options)
response = requests.get("http://127.0.0.1:%i/" % self.web_port)
self.assertEqual(200, response.status_code)
self.assertNotIn("http://example.com", response.content.decode("utf-8"))
self.assertIn("setting this will override the host on all Locust classes", response.content.decode("utf-8"))
|
<reponame>zhouwenfan/temp
# Author: <NAME>
import unittest
from kernel.type import TVar, Type, TFun, boolT
from kernel.term import Var, Const, Term
from kernel.thm import Thm
from kernel.extension import AxType, AxConstant, Theorem, Attribute
from logic import logic, induct
imp = Term.mk_implies
eq = Term.mk_equals
all = Term.mk_all
conj = logic.mk_conj
class InductTest(unittest.TestCase):
def testInductNat(self):
nat = Type("nat")
nat_ext = induct.add_induct_type(
"nat", [], [("zero", nat, []), ("Suc", TFun(nat, nat), ["n"])])
zero = Const("zero", nat)
S = Const("Suc", TFun(nat, nat))
n = Var("n", nat)
n2 = Var("n'", nat)
x = Var("x", nat)
P = Var("P", TFun(nat, boolT))
res = [
AxType("nat", 0),
AxConstant("zero", nat),
AxConstant("Suc", TFun(nat, nat)),
Theorem("nat_zero_Suc_neq", Thm([], logic.neg(eq(zero, S(n))))),
Theorem("nat_Suc_inject", Thm([], imp(eq(S(n), S(n2)), eq(n, n2)))),
Theorem("nat_induct", Thm([], imp(P(zero), all(n, imp(P(n), P(S(n)))), P(x)))),
Attribute("nat_induct", "var_induct")
]
self.assertEqual(nat_ext.data, res)
def testInductAdd(self):
nat = Type("nat")
plus = Const("plus", TFun(nat, nat, nat))
zero = Const("zero", nat)
S = Const("Suc", TFun(nat, nat))
m = Var("m", nat)
n = Var("n", nat)
ext = induct.add_induct_def(
'plus', TFun(nat, nat, nat), [
eq(plus(zero, n), n),
eq(plus(S(m), n), S(plus(m, n)))])
res = [
AxConstant("plus", TFun(nat, nat, nat)),
Theorem("plus_def_1", Thm([], eq(plus(zero, n), n))),
Attribute("plus_def_1", "hint_rewrite"),
Theorem("plus_def_2", Thm([], eq(plus(S(m), n), S(plus(m, n))))),
Attribute("plus_def_2", "hint_rewrite"),
]
self.assertEqual(ext.data, res)
def testInductList(self):
Ta = TVar("a")
Tlista = Type("list", Ta)
list_ext = induct.add_induct_type(
"list", ["a"], [("nil", Tlista, []), ("cons", TFun(Ta, Tlista, Tlista), ["x", "xs"])])
nil = Const("nil", Tlista)
cons = Const("cons", TFun(Ta, Tlista, Tlista))
x = Var("x", Ta)
xs = Var("xs", Tlista)
x2 = Var("x'", Ta)
xs2 = Var("xs'", Tlista)
P = Var("P", TFun(Tlista, boolT))
xlist = Var("x", Tlista)
res = [
AxType("list", 1),
AxConstant("nil", Tlista),
AxConstant("cons", TFun(Ta, Tlista, Tlista)),
Theorem("list_nil_cons_neq", Thm([], logic.neg(eq(nil, cons(x, xs))))),
Theorem("list_cons_inject", Thm([], imp(eq(cons(x, xs), cons(x2, xs2)), conj(eq(x, x2), eq(xs, xs2))))),
Theorem("list_induct", Thm([], imp(P(nil), all(x, all(xs, imp(P(xs), P(cons(x, xs))))), P(xlist)))),
Attribute("list_induct", "var_induct")
]
self.assertEqual(list_ext.data, res)
def testInductProd(self):
Ta = TVar("a")
Tb = TVar("b")
Tab = Type("prod", Ta, Tb)
prod_ext = induct.add_induct_type(
"prod", ["a", "b"], [("Pair", TFun(Ta, Tb, Tab), ["a", "b"])])
a = Var("a", Ta)
b = Var("b", Tb)
a2 = Var("a'", Ta)
b2 = Var("b'", Tb)
pair = Const("Pair", TFun(Ta, Tb, Tab))
P = Var("P", TFun(Tab, boolT))
x = Var("x", Tab)
res = [
AxType("prod", 2),
AxConstant("Pair", TFun(Ta, Tb, Tab)),
Theorem("prod_Pair_inject", Thm([], imp(eq(pair(a, b), pair(a2, b2)), conj(eq(a, a2), eq(b, b2))))),
Theorem("prod_induct", Thm([], imp(all(a, all(b, P(pair(a, b)))), P(x)))),
Attribute("prod_induct", "var_induct")
]
self.assertEqual(prod_ext.data, res)
def testInductPredicate(self):
nat = Type("nat")
even = Const("even", TFun(nat, boolT))
zero = Const("zero", nat)
Suc = Const("Suc", TFun(nat, nat))
n = Var("n", nat)
prop_zero = even(zero)
prop_Suc = Term.mk_implies(even(n), even(Suc(Suc(n))))
data = [("even_zero", prop_zero), ("even_Suc", prop_Suc)]
even_ext = induct.add_induct_predicate("even", TFun(nat, boolT), data)
a1 = Var("_a1", nat)
P = Var("P", boolT)
res = [
AxConstant("even", TFun(nat, boolT)),
Theorem("even_zero", Thm([], even(zero))),
Attribute("even_zero", "hint_backward"),
Theorem("even_Suc", Thm.mk_implies(even(n), even(Suc(Suc(n))))),
Attribute("even_Suc", "hint_backward"),
Theorem("even_cases", Thm.mk_implies(even(a1), imp(eq(a1,zero), P), all(n, imp(eq(a1,Suc(Suc(n))), even(n), P)), P))
]
self.assertEqual(even_ext.data, res)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# Variables needed for pre-setting up the session.
session_type = 'train'
# ========== Variables needed for the session itself. ========
# === Variables that are read from the cmd line too. ===
# WARN: Values given in cmd line overwrite these given below.
out_path = "./output/mnist100/"
device = None # GPU device number
model_to_load = None # To start from pretrained model/continue training.
plot_save_emb = 0 # Plot embedding if emb_size is 2D. 0: No. 1: save. 2: plot. 3: save & plot.
# === Variables not given from command line ===
# --- Network architecture ---
model = {'feat_extractor_z': 'mnist_cnn', 'emb_z_size': 128, 'emb_z_act': 'elu',
'batch_norm_decay': 0.99, 'l2_reg_feat': 1e-4, 'l2_reg_classif': 1e-4, 'classifier_dropout': 0.5}
# --- Validation, labelled and unlabelled folds for training ---
dataset = 'mnist' # mnist, svhn, cifar
val_on_test = True # If True, validate on test dataset. Else, validate on subset of training data.
num_val_samples = -1 # How many samples to use for validation when training. -1 to use all.
num_lbl_samples = 100 # How many labelled data to learn from. -1 to use all.
num_unlbl_samples = -1 # How many unlabelled data to learn from. -1 to use all.
unlbl_overlap_val = True # If True and val_on_test=False, unlabelled samples may overlap with validation.
unlbl_overlap_lbl = True # If True, unlabelled samples can overlap with labelled. If False, they are disjoint.
# --- Batch sampling, normalization, augmentation ---
n_lbl_per_class_per_batch = 10 # How many labelled samples per class in a batch.
n_unlbl_per_batch = 100 # How many unlabelled samples in a batch.
norm_imgs_pre_aug = "zscoreDb" # None, zscoreDb, zscorePerCase, center0sc1, rescale01, zca, zca_center0sc1.
augm_params = {}
# Augmentation options:
# augm_params = { "reflect":{"apply": True},
# "color_inv": {"apply": False, "params": {"p":0.5}},
# "rand_crop": {"apply": False, "params": {'transf':[2,2]}},
# "blur": {"apply": False, "params": {'p':0.5, 's':1.0, 'sc':1}} }
seed = None
# --- Training loop ---
max_iters = 45000 # Max training iterations
val_during_train = True # Whether to validate performance every now and then.
val_interval = 500 # Every how many training steps to validate performance.
# Learning rate schedule
lr_sched_type = 'expon_decay' # 'expon_decay' or 'piecewise'
lr_expon_init = 1e-3 # Only for expon. Initial LR.
lr_expon_decay_factor = 0.333 # Only for expon. How much to decrease.
lr_expon_decay_steps = 5000 # Only for expon. How often to decrease.
lr_piecewise_boundaries = None # Only for expon. When to change LR.
lr_piecewise_values = None # Only for expon. Initial and following values.
# --- Compact Clustering via Label Propagation (CCLP) ---
cc_weight = 1.0 # Weight w in: Ltotal = Lsup + w*Lcclp . Set to 0 to disable CCLP.
cc_steps = 3 # Length of longest chain to optimize. Set to 0 to disable CCLP.
cc_loss_on = (cc_steps > 0) or (cc_weight > 0) # Set to False to disable.
# Params for creating the graph.
cc_sim_metric = "dot" # dot or L2, similarity metric for creating the graph.
cc_l2_sigmas_init = 1.0 # Only for L2. float or list of floats per dim.
cc_l2_sigmas_trainable = True # Only for L2. Whether to learn the sigmas.
cc_l2_sigmas_lr_multipl = 1.0 # Only for L2.
# Secondary configs for CCLP.
cc_sum_over_chains = True # If False, only the longest chain is optimized.
cc_e_smooth = 0.00001
cc_optim_smooth_mtx = True
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Visualization routines using matplotlib
"""
import copy
import logging
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize, LogNorm, SymLogNorm
from matplotlib.patches import Ellipse, RegularPolygon, Rectangle, Circle
from numpy import sqrt
from ctapipe.instrument import PixelShape
__all__ = ["CameraDisplay"]
logger = logging.getLogger(__name__)
def polar_to_cart(rho, phi):
""""returns r, theta(degrees)"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
class CameraDisplay:
"""
Camera Display using matplotlib.
Parameters
----------
geometry : `~ctapipe.instrument.CameraGeometry`
Definition of the Camera/Image
image: array_like
array of values corresponding to the pixels in the CameraGeometry.
ax : `matplotlib.axes.Axes`
A matplotlib axes object to plot on, or None to create a new one
title : str (default "Camera")
Title to put on camera plot
norm : str or `matplotlib.color.Normalize` instance (default 'lin')
Normalization for the color scale.
Supported str arguments are
- 'lin': linear scale
- 'log': logarithmic scale (base 10)
cmap : str or `matplotlib.colors.Colormap` (default 'hot')
Color map to use (see `matplotlib.cm`)
allow_pick : bool (default False)
if True, allow user to click and select a pixel
autoupdate : bool (default True)
redraw automatically (otherwise need to call plt.draw())
autoscale : bool (default True)
rescale the vmin/vmax values when the image changes.
This is set to False if `set_limits_*` is called to explicity
set data limits.
Notes
-----
Speed:
CameraDisplay is not intended to be very fast (matplotlib
is not a very speed performant graphics library, it is
intended for nice output plots). However, most of the
slowness of CameraDisplay is in the constructor. Once one is
displayed, changing the image that is displayed is relatively
fast and efficient. Therefore it is best to initialize an
instance, and change the data, rather than generating new
CameraDisplays.
Pixel Implementation:
Pixels are rendered as a
`matplotlib.collections.PatchCollection` of Polygons (either 6
or 4 sided). You can access the PatchCollection directly (to
e.g. change low-level style parameters) via
`CameraDisplay.pixels`
Output:
Since CameraDisplay uses matplotlib, any display can be
saved to any output file supported via
plt.savefig(filename). This includes ``.pdf`` and ``.png``.
"""
def __init__(
self,
geometry,
image=None,
ax=None,
title=None,
norm="lin",
cmap=None,
allow_pick=False,
autoupdate=True,
autoscale=True,
show_frame=True,
):
self.axes = ax if ax is not None else plt.gca()
self.pixels = None
self.colorbar = None
self.autoupdate = autoupdate
self.autoscale = autoscale
self._active_pixel = None
self._active_pixel_label = None
self._axes_overlays = []
self.geom = geometry
if title is None:
title = f"{geometry.camera_name}"
# initialize the plot and generate the pixels as a
# RegularPolyCollection
patches = []
if hasattr(self.geom, "mask"):
self.mask = self.geom.mask
else:
self.mask = np.ones_like(self.geom.pix_x.value, dtype=bool)
pix_x = self.geom.pix_x.value[self.mask]
pix_y = self.geom.pix_y.value[self.mask]
pix_width = self.geom.pixel_width.value[self.mask]
for x, y, w in zip(pix_x, pix_y, pix_width):
if self.geom.pix_type == PixelShape.HEXAGON:
r = w / np.sqrt(3)
patch = RegularPolygon(
(x, y),
6,
radius=r,
orientation=self.geom.pix_rotation.to_value(u.rad),
fill=True,
)
elif self.geom.pix_type == PixelShape.CIRCLE:
patch = Circle((x, y), radius=w / 2, fill=True)
elif self.geom.pix_type == PixelShape.SQUARE:
patch = Rectangle(
(x - w / 2, y - w / 2),
width=w,
height=w,
angle=self.geom.pix_rotation.to_value(u.deg),
fill=True,
)
patches.append(patch)
self.pixels = PatchCollection(patches, cmap=cmap, linewidth=0)
self.axes.add_collection(self.pixels)
self.pixel_highlighting = copy.copy(self.pixels)
self.pixel_highlighting.set_facecolor("none")
self.pixel_highlighting.set_linewidth(0)
self.axes.add_collection(self.pixel_highlighting)
# Set up some nice plot defaults
self.axes.set_aspect("equal", "datalim")
self.axes.set_title(title)
self.axes.autoscale_view()
if show_frame:
self.add_frame_name()
# set up a patch to display when a pixel is clicked (and
# pixel_picker is enabled):
self._active_pixel = copy.copy(patches[0])
self._active_pixel.set_facecolor("r")
self._active_pixel.set_alpha(0.5)
self._active_pixel.set_linewidth(2.0)
self._active_pixel.set_visible(False)
self.axes.add_patch(self._active_pixel)
if hasattr(self._active_pixel, "xy"):
center = self._active_pixel.xy
else:
center = self._active_pixel.center
self._active_pixel_label = self.axes.text(
*center, "0", horizontalalignment="center", verticalalignment="center"
)
self._active_pixel_label.set_visible(False)
# enable ability to click on pixel and do something (can be
# enabled on-the-fly later as well:
if allow_pick:
self.enable_pixel_picker()
if image is not None:
self.image = image
else:
self.image = np.zeros_like(self.geom.pix_id, dtype=np.float)
self.norm = norm
self.auto_set_axes_labels()
def highlight_pixels(self, pixels, color="g", linewidth=1, alpha=0.75):
"""
Highlight the given pixels with a colored line around them
Parameters
----------
pixels : index-like
The pixels to highlight.
Can either be a list or array of integers or a
boolean mask of length number of pixels
color: a matplotlib conform color
the color for the pixel highlighting
linewidth: float
linewidth of the highlighting in points
alpha: 0 <= alpha <= 1
The transparency
"""
l = np.zeros_like(self.image)
l[pixels] = linewidth
self.pixel_highlighting.set_linewidth(l)
self.pixel_highlighting.set_alpha(alpha)
self.pixel_highlighting.set_edgecolor(color)
self._update()
def enable_pixel_picker(self):
""" enable ability to click on pixels """
self.pixels.set_picker(True) # enable click
self.pixels.set_pickradius(
sqrt(u.Quantity(self.geom.pix_area[0]).value) / np.pi
)
self.pixels.set_snap(True) # snap cursor to pixel center
self.axes.figure.canvas.mpl_connect("pick_event", self._on_pick)
def set_limits_minmax(self, zmin, zmax):
""" set the color scale limits from min to max """
self.pixels.set_clim(zmin, zmax)
self.autoscale = False
self._update()
def set_limits_percent(self, percent=95):
""" auto-scale the color range to percent of maximum """
zmin = np.nanmin(self.pixels.get_array())
zmax = np.nanmax(self.pixels.get_array())
dz = zmax - zmin
frac = percent / 100.0
self.autoscale = False
self.set_limits_minmax(zmin, zmax - (1.0 - frac) * dz)
@property
def norm(self):
"""
The norm instance of the Display
Possible values:
- "lin": linear scale
- "log": log scale (cannot have negative values)
- "symlog": symmetric log scale (negative values are ok)
- any matplotlib.colors.Normalize instance, e. g. PowerNorm(gamma=-2)
"""
return self.pixels.norm
@norm.setter
def norm(self, norm):
if norm == "lin":
self.pixels.norm = Normalize()
elif norm == "log":
self.pixels.norm = LogNorm()
self.pixels.autoscale() # this is to handle matplotlib bug #5424
elif norm == "symlog":
self.pixels.norm = SymLogNorm(linthresh=1.0, base=10)
self.pixels.autoscale()
elif isinstance(norm, Normalize):
self.pixels.norm = norm
else:
raise ValueError(
"Unsupported norm: '{}', options are 'lin',"
"'log','symlog', or a matplotlib Normalize object".format(norm)
)
self.update(force=True)
self.pixels.autoscale()
@property
def cmap(self):
"""
Color map to use. Either a name or `matplotlib.colors.ColorMap`
instance, e.g. from `matplotlib.pyplot.cm`
"""
return self.pixels.get_cmap()
@cmap.setter
def cmap(self, cmap):
self.pixels.set_cmap(cmap)
self._update()
@property
def image(self):
"""The image displayed on the camera (1D array of pixel values)"""
return self.pixels.get_array()
@image.setter
def image(self, image):
"""
Change the image displayed on the Camera.
Parameters
----------
image: array_like
array of values corresponding to the pixels in the CameraGeometry.
"""
image = np.asanyarray(image)
if image.shape != self.geom.pix_x.shape:
raise ValueError(
(
"Image has a different shape {} than the " "given CameraGeometry {}"
).format(image.shape, self.geom.pix_x.shape)
)
self.pixels.set_array(np.ma.masked_invalid(image[self.mask]))
self.pixels.changed()
if self.autoscale:
self.pixels.autoscale()
self._update()
def _update(self, force=False):
""" signal a redraw if autoupdate is turned on """
if self.autoupdate:
self.update(force)
def update(self, force=False):
""" redraw the display now """
self.axes.figure.canvas.draw()
if self.colorbar is not None:
if force is True:
self.colorbar.update_bruteforce(self.pixels)
else:
self.colorbar.update_normal(self.pixels)
self.colorbar.draw_all()
def add_colorbar(self, **kwargs):
"""
add a colorbar to the camera plot
kwargs are passed to `figure.colorbar(self.pixels, **kwargs)`
See matplotlib documentation for the supported kwargs:
http://matplotlib.org/api/figure_api.html#matplotlib.figure.Figure.colorbar
"""
if self.colorbar is not None:
raise ValueError(
"There is already a colorbar attached to this CameraDisplay"
)
else:
if "ax" not in kwargs:
kwargs["ax"] = self.axes
self.colorbar = self.axes.figure.colorbar(self.pixels, **kwargs)
self.update()
def add_ellipse(self, centroid, length, width, angle, asymmetry=0.0, **kwargs):
"""
plot an ellipse on top of the camera
Parameters
----------
centroid: (float, float)
position of centroid
length: float
major axis
width: float
minor axis
angle: float
rotation angle wrt x-axis about the centroid, anticlockwise, in radians
asymmetry: float
3rd-order moment for directionality if known
kwargs:
any MatPlotLib style arguments to pass to the Ellipse patch
"""
ellipse = Ellipse(
xy=centroid,
width=length,
height=width,
angle=np.degrees(angle),
fill=False,
**kwargs,
)
self.axes.add_patch(ellipse)
self.update()
return ellipse
def overlay_moments(
self, hillas_parameters, with_label=True, keep_old=False, **kwargs
):
"""helper to overlay ellipse from a `HillasParametersContainer` structure
Parameters
----------
hillas_parameters: `HillasParametersContainer`
structuring containing Hillas-style parameterization
with_label: bool
If True, show coordinates of centroid and width and length
keep_old: bool
If True, to not remove old overlays
kwargs: key=value
any style keywords to pass to matplotlib (e.g. color='red'
or linewidth=6)
"""
if not keep_old:
self.clear_overlays()
# strip off any units
cen_x = u.Quantity(hillas_parameters.x).value
cen_y = u.Quantity(hillas_parameters.y).value
length = u.Quantity(hillas_parameters.length).value
width = u.Quantity(hillas_parameters.width).value
el = self.add_ellipse(
centroid=(cen_x, cen_y),
length=length * 2,
width=width * 2,
angle=hillas_parameters.psi.rad,
**kwargs,
)
self._axes_overlays.append(el)
if with_label:
text = self.axes.text(
cen_x,
cen_y,
"({:.02f},{:.02f})\n[w={:.02f},l={:.02f}]".format(
hillas_parameters.x,
hillas_parameters.y,
hillas_parameters.width,
hillas_parameters.length,
),
color=el.get_edgecolor(),
)
self._axes_overlays.append(text)
def clear_overlays(self):
""" Remove added overlays from the axes """
while self._axes_overlays:
overlay = self._axes_overlays.pop()
overlay.remove()
def _on_pick(self, event):
""" handler for when a pixel is clicked """
pix_id = event.ind[-1]
xx, yy, aa = (
u.Quantity(self.geom.pix_x[pix_id]).value,
u.Quantity(self.geom.pix_y[pix_id]).value,
u.Quantity(np.array(self.geom.pix_area)[pix_id]),
)
if self.geom.pix_type.startswith("hex"):
self._active_pixel.xy = (xx, yy)
else:
rr = sqrt(aa)
self._active_pixel.xy = (xx - rr / 2.0, yy - rr / 2.0)
self._active_pixel.set_visible(True)
self._active_pixel_label.set_x(xx)
self._active_pixel_label.set_y(yy)
self._active_pixel_label.set_text(f"{pix_id:003d}")
self._active_pixel_label.set_visible(True)
self._update()
self.on_pixel_clicked(pix_id) # call user-function
def on_pixel_clicked(self, pix_id):
"""virtual function to overide in sub-classes to do something special
when a pixel is clicked
"""
print(f"Clicked pixel_id {pix_id}")
def show(self):
self.axes.figure.show()
def auto_set_axes_labels(self):
""" set the axes labels based on the Frame attribute"""
axes_labels = ("X", "Y")
if self.geom.frame is not None:
axes_labels = list(
self.geom.frame.get_representation_component_names().keys()
)
self.axes.set_xlabel(f"{axes_labels[0]} ({self.geom.pix_x.unit})")
self.axes.set_ylabel(f"{axes_labels[1]} ({self.geom.pix_y.unit})")
def add_frame_name(self, color="grey"):
""" label the frame type of the display (e.g. CameraFrame) """
frame_name = (
self.geom.frame.__class__.__name__
if self.geom.frame is not None
else "Unknown Frame"
)
self.axes.text( # position text relative to Axes
1.0,
0.0,
frame_name,
ha="right",
va="bottom",
transform=self.axes.transAxes,
color=color,
fontsize="smaller",
)
|
<reponame>viitormiiguel/AnalysisFinancial<filename>BuildLex/countWords.py
import sys
import codecs
import nltk
from nltk.corpus import stopwords
from nltk import pos_tag, word_tokenize
import csv
import datetime
from collections import Counter
import re
import math
from textblob import TextBlob as tb
now = datetime.datetime.now()
today = now.strftime("%Y-%m-%d")
dTrading = 'C:/Users/vitor/Documents/GetDataset/TradingView/'
default_stopwords = set(nltk.corpus.stopwords.words('portuguese'))
def RemoveStopWords(instancia):
instancia = instancia.lower()
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return (" ".join(palavras))
def preProcess(txt):
# Conversao para minusculos
frase = txt.lower()
# Remover urls
frase = re.sub(r"http\S+", "", frase)
# Remoção $ e %
frase = re.sub('[R$%]','',frase)
# Remoção de numeros
frase = re.sub('[-10-9]','', frase)
# Remoçao de pontuação
frase = re.sub(r'[-./?!,":;()\']','',frase)
# Remoção de stopwords
frase = re.sub('[➖]','',frase)
texto = RemoveStopWords(frase)
return texto
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf (word, bloblist)
def algo(b, t, sigla):
# f1 = open(dTrading + today + '/lexicon-tf-idf-1.txt', 'a+', encoding="utf8")
f1 = open(dTrading + today + '/words-' + sigla + '.txt', 'a+', encoding="utf8")
for i, blob in enumerate(b):
# print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, blob, b) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:30]:
# print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
f1.write(t + ' - ' + word + '\n')
f1.close()
def divideDataset(fonte, ativo, sgl):
bl1 = []
bl2 = []
bl3 = []
doc1 = ""
doc2 = ""
doc3 = ""
with open(fonte + today + ativo + '.csv') as dados:
reader = csv.reader(dados, delimiter=';')
# next(reader)
d1 = [t for t in reader]
for d in d1:
try:
if 'Positivo' in d[1]:
d1 = preProcess(d[0])
doc1 += "\n" + d1
t1 = tb(doc1)
bl1 = [t1]
if 'Neutro' in d[1]:
d2 = preProcess(d[0])
doc2 += "\n" + d2
t2 = tb(doc2)
bl2 = [t2]
if 'Negativo' in d[1]:
d3 = preProcess(d[0])
doc3 += "\n" + d3
t3 = tb(doc3)
bl3 = [t3]
except IndexError:
_ = 'null'
algo(bl2, 'n', sgl)
algo(bl1, 'p', sgl)
# algo(bl3, 'nt', sgl)
divideDataset(dTrading, '/polaritySentiLexPre_bbdc4', 'bbdc4')
divideDataset(dTrading, '/polaritySentiLexPre_abev3', 'abev3')
divideDataset(dTrading, '/polaritySentiLexPre_brfs3', 'brfs3')
divideDataset(dTrading, '/polaritySentiLexPre_ciel3', 'ciel3')
divideDataset(dTrading, '/polaritySentiLexPre_goll4', 'goll4')
divideDataset(dTrading, '/polaritySentiLexPre_itsa4', 'itsa4')
divideDataset(dTrading, '/polaritySentiLexPre_natu3', 'natu3')
divideDataset(dTrading, '/polaritySentiLexPre_petr4', 'petr4') |
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import functools
import re
try:
unicode('a')
except:
unicode=str
from ..logger import get_module_logging
logging = get_module_logging(__name__)
navigator_domains = []
#FIXME: hacks!
isDataset = lambda ds: (hasattr(ds, 'dstype') and hasattr(ds, 'datatype'))
QtUserRole = 32
def docname(ds):
"""Get dataset name by searching in parent document data"""
for name, obj in ds.document.data.items():
if obj == ds:
return name
return None
def node(func):
"""Decorator for functions which should get currentIndex node if no arg is passed"""
@functools.wraps(func)
def node_wrapper(self, *a, **k):
n = False
keyword = True
# Get node from named parameter
if 'node' in k:
n = k['node']
# Or from the first unnamed argument
elif len(a) >= 1:
n = a[0]
keyword = False
# If node was not specified, get from currentIndex
if n in (None, False):
n = self.model().data(self.currentIndex(), role=QtUserRole)
logging.debug('@node not specified: got selected', n)
elif isDataset(n):
n = docname(n)
logging.debug('@node was a dataset: found path', n)
# If node was expressed as/converted to string, get its corresponding
# tree entry
if isinstance(n, str) or isinstance(n, unicode):
logging.debug('traversing node', n)
n = str(n)
n = self.model().tree.traverse(n)
if keyword:
k['node'] = n
else:
a = list(a)
a[0] = n
a = tuple(a)
logging.debug('@node returning', n, type(n), isinstance(n, unicode))
return func(self, *a, **k)
return node_wrapper
def nodes(func):
"""Decorator for functions which should get a list of currentIndex nodes if no arg is passed"""
@functools.wraps(func)
def node_wrapper(self, *a, **k):
n = []
keyword = True
# Get node from named parameter
if 'nodes' in k:
n = k['nodes']
# Or from the first unnamed argument
elif len(a) >= 1:
n = a[0]
keyword = False
# If node was not specified, get from currentIndex
if not n:
n = []
for idx in self.selectedIndexes():
n0 = self.model().data(idx, role=QtUserRole)
n.append(n0)
if keyword:
k['nodes'] = n
else:
a = list(a)
a[0] = n
a = tuple(a)
logging.debug('@nodes with', n, type(n), isinstance(n, unicode))
return func(self, *a, **k)
return node_wrapper
class NavigatorDomain(object):
def __init__(self, navigator):
self.navigator = navigator
self.create_shortcuts()
def create_shortcuts(self):
pass
@nodes
def get_datasets_from_selection(self, nodes=False):
header = [(node.path, node) for node in nodes]
header = filter(lambda el: el[0] in self.doc.data, header)
return header
def iternodes(self, nodes, func, *args, **kwargs):
for node in nodes:
func(node, *args, **kwargs)
@property
def model(self):
"""Hack to allow nodes() decorator"""
return self.navigator.model
def currentIndex(self, *a, **k):
return self.navigator.currentIndex(*a, **k)
def selectedIndexes(self, *a, **k):
return self.navigator.selectedIndexesPublic(*a, **k)
@property
def mainwindow(self):
return self.navigator.mainwindow
@property
def doc(self):
return self.navigator.doc
def xnames(self, *a, **k):
return self.navigator.xnames(*a, **k)
def dsnode(self, *a, **k):
return self.navigator.dsnode(*a, **k)
def plot(self, *a, **k):
return self.navigator.plot(*a, **k)
def is_loaded(self, node):
return (node.ds is not False) and (len(node.ds) > 0)
def is_plotted(self, node):
if not self.is_loaded(node):
return False
return len(self.model().is_plotted(node.path)) > 0
def double_clicked(self, node):
return False
def check_node(self, node):
"""Check if node pertain to this domain"""
return True
def match_node_path(self, node, rule):
if (not node) or (not node.path):
return False
regex = re.compile(rule.replace('\n', '|'))
return regex.search(node.path)
def check_nodes(self, nodes):
"""Check if multiple nodes selection pertain to this domain"""
return True
def add_base_menu(self, menu, node=False):
return True
def build_base_menu(self, menu, node=False):
if not self.check_node(node):
return False
return self.add_base_menu(menu, node)
def add_file_menu(self, menu, node):
return True
def build_file_menu(self, menu, node):
if not self.check_node(node):
return False
return self.add_file_menu(menu, node)
def add_group_menu(self, menu, node):
return True
def build_group_menu(self, menu, node):
if not self.check_node(node):
return False
return self.add_group_menu(menu, node)
def add_sample_menu(self, menu, node):
return True
def build_sample_menu(self, menu, node):
if not self.check_node(node):
return False
return self.add_sample_menu(menu, node)
def add_dataset_menu(self, menu, node):
return True
def build_dataset_menu(self, menu, node):
if not self.check_node(node):
return False
return self.add_dataset_menu(menu, node)
def add_derived_dataset_menu(self, menu, node):
return True
def build_derived_dataset_menu(self, menu, node):
if not self.check_node(node):
return False
return self.add_derived_dataset_menu(menu, node)
def add_multiary_menu(self, menu, nodes):
return True
def build_multiary_menu(self, menu, nodes):
if not self.check_nodes(nodes):
return False
return self.add_multiary_menu(menu, nodes)
def add_nodoc_menu(self, menu, proxy):
return False
def build_nodoc_menu(self, menu, proxy):
"""Build an out-of-anlysis menu"""
return self.add_nodoc_menu(menu, proxy)
|
import requests
import json
import UnityClasses
from .UnityClasses import *
requests.packages.urllib3.disable_warnings()
class Unity:
""" Class representing an EMC Unity Array """
def __init__(self, ip_addr, username, password):
self.ip_addr = ip_addr
self.username = username
self.password = password
self.is_auth = False
self.api_url = 'https://%s/api' % self.ip_addr
self.headers = {'Accept':'application/json',
'Content-type':'application/json',
'X-EMC-REST-CLIENT':'true'}
self.session = requests.Session()
sys_info = self.unity_request('/instances/basicSystemInfo/0').json()
self.name = sys_info['content']['name']
self.model = sys_info['content']['model']
self.software = sys_info['content']['softwareVersion']
def process_response(self, response):
""" Process the HTTPS response and set headers or raise exceptions """
# TODO: work with Exceptions for easier troubleshooting
response.raise_for_status()
if 'EMC-CSRF-TOKEN' not in self.headers:
self.headers['EMC-CSRF-TOKEN'] = response.headers.get('emc-csrf-token')
if self.headers['EMC-CSRF-TOKEN']:
self.is_auth = True
return
def get_from_type(self, url_path, object_type, payload = None):
"""
Performs a request of all fields for a given object_type unless
specific fields have been requested as part of the payload
"""
def process(content):
"""Warn about additions to the REST API"""
try:
obj = object_type(**content)
except TypeError:
good, bad = {}, {}
for key, value in content.items():
if key in object_type._fields:
good[key] = value
else:
bad[key] = value
from warnings import warn
warn('Unity REST API call returned unexpected fields: %r' % bad,
RuntimeWarning, stacklevel=3)
obj = object_type(**good)
return obj
if not payload:
payload = dict()
if 'fields' not in payload:
payload['fields'] = ",".join(object_type._fields)
response = self.unity_request(url_path,'GET',payload = payload).json()
if 'entries' in response:
returned_items = []
for item in response['entries']:
returned_items.append(process(item['content']))
return returned_items
elif 'content' in response:
return process(response['content'])
else:
return None
def unity_request(self, url_path, method = 'GET', payload = None):
""" Perform a request to the Unity array """
if not payload:
payload = dict()
url = self.api_url + url_path
if method == 'GET':
request_function = self.session.get
elif method == 'POST':
request_function = self.session.post
elif method == 'DELETE':
request_function = self.session.delete
else:
return None
if method != 'POST':
if self.is_auth:
response = request_function(url, verify = False,
headers = self.headers,
params = payload)
else:
response = request_function(url, verify = False,
auth = (self.username, self.password),
headers = self.headers,
params = payload)
else: # For POST requests, we pass data, not payload
payload = json.dumps(payload)
if self.is_auth:
response = request_function(url, verify = False,
headers = self.headers,
data = payload)
else:
response = request_function(url, verify = False,
auth = (self.username, self.password),
headers = self.headers,
data = payload)
self.process_response(response)
return response
def get(self, url_path, payload = None):
""" Wrapper for performing a GET unity request """
return self.unity_request(url_path, method='GET', payload = payload)
def post(self, url_path, payload = None):
""" Wrapper for performing a POST unity request """
return self.unity_request(url_path, method='POST', payload = payload)
def delete(self, url_path, payload = None):
""" Wrapper for performing a DELETE unity request """
return self.unity_request(url_path, method='DELETE', payload = payload)
def get_object(self, unity_type, item_filter = None, item_id=None, item_name=None):
""" Get an object (singular or a collection) """
payload = dict()
# Take the unity_type string passed in and determine the actual object
unity_object = getattr(UnityClasses, "Unity%s" % unity_type)
if item_filter:
payload['filter'] = item_filter
if not item_id and not item_name: # Request is for all objects
response = self.get_from_type('/types/%s/instances' % unity_type, unity_object, payload = payload)
return response
if item_id: # Request is for a specific ID
response = self.get_from_type('/instances/%s/%s' % (unity_type, item_id), unity_object, payload = payload)
elif item_name: # Request is for a specific name
if 'filter' in payload:
payload['filter'] = payload['filter'] + ' && name eq "%s"' % item_name
else:
payload['filter'] = 'name eq "%s"' % item_name
response = self.get_from_type('/types/%s/instances' % unity_type, unity_object, payload = payload)
return response
# Network communications
# -----------------------------------------
def cifsServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('cifsServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def dnsServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('dnsServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileDNSServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileDNSServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileInterface(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileInterface',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileKerberosServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileKerberosServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileLDAPServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileLDAPServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileNDMPServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileNDMPServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fileNISServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fileNISServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ftpServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ftpServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ipInterface(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ipInterface',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ipPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ipPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def iscsiNode(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('iscsiNode',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def iscsiPortal(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('iscsiPortal',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def iscsiSettings(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('iscsiSettings',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def linkAggregation(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('linkAggregation',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def mgmtInterface(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('mgmtInterface',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def mgmtInterfaceSettings(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('mgmtInterfaceSettings',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def nasServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('nasServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def nfsServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('nfsServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def preferredInterfaceSettings(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('preferredInterfaceSettings',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def route(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('route',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def smtpServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('smtpServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def urServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('urServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def virusChecker(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('virusChecker',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def vmwareNasPEServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('vmwareNasPEServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Events and Alerts
# -----------------------------------------
def alert(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('alert',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def alertConfig(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('alertConfig',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def alertConfigSNMPTarget(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('alertConfigSNMPTarget',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def event(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('event',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Jobs
# -----------------------------------------
def job(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('job',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Remote Systems
# -----------------------------------------
def cifsShare(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('cifsShare',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def datastore(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('datastore',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def host(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('host',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostContainer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostContainer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostIPPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostIPPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostInitiator(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostInitiator',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostInitiatorPath(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostInitiatorPath',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostLUN(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostLUN',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def hostVVolDatastore(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('hostVVolDatastore',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def nfsShare(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('nfsShare',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def remoteSystem(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('remoteSYstem',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def rpChapSettings(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('rpChapSettings',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def vm(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('vm',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def vmDisk(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('vmDisk',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def vmwarePE(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('vmwarePE',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Storage Management
# -----------------------------------------
def aclUser(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('aclUser',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def capabilityProfile(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('capabilityProfile',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def dhsmServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('dhsmServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def diskGroup(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('diskGroup',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fastCache(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fastCache',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fastVP(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fastVP',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def filesystem(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('filesystem',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def lun(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('lun',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def pool(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('pool',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def poolConsumer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('poolConsumer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def poolConsumerAllocation(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('poolConsumerAllocation',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def poolUnit(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('poolUnit',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def quotaConfig(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('quotaConfig',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def raidGroup(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('raidGroup',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def storageResource(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('storageResource',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def storageResourceCapabilityProfile(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('storageResourceCapabilityProfile',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def storageTier(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('storageTier',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def treeQuota(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('treeQuota',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def userQuota(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('userQuota',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def virtualVolume(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('virtualVolume',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Environment Management
# -----------------------------------------
def battery(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('battery',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def dae(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('dae',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def disk(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('disk',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def dpe(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('dpe',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def encryption(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('encryption',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ethernetPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ethernetPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fan(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fan',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def fcPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('fcPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ioModule(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ioModule',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def lcc(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('lcc',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def memoryModule(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('memoryModule',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def powerSupply(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('powerSupply',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def sasPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('sasPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ssc(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ssc',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ssd(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ssd',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def storageProcessor(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('storageProcessor',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def uncommittedPort(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('uncommittedPort',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Managing the System
# -----------------------------------------
def basicSystemInfo(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('basicSystemInfo',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def candidateSoftwareVersion(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('candidateSoftwareVersion',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def feature(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('feature',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def installedSoftwareVersion(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('installedSoftwareVersion',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def license(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('license',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ntpServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ntpServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def remoteSyslog(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('remoteSyslog',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def serviceContract(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('serviceContract',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def softwareUpgradeSession(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('softwareUpgradeSession',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def system(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('system',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def systemInformation(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('systemInformation',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def systemLimit(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('systemLimit',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def systemTime(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('systemTime',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Monitoring capacity and performance
# -----------------------------------------
def metric(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('metric',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def metricCollection(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('metricCollection',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def metricQueryResult(self, item_filter = None, item_id=None, item_name=None):
""" metricQueryResult is an odd request, as it REQUIRES a specific filter
to be passed to it. For the user, we're taking that as either a part
of the filter, or we're creating the filter for them """
if item_id:
if "queryId" not in item_filter:
if item_filter:
item_filter = "queryId EQ %s && %s" % (item_id, item_filter)
else:
item_filter = "queryId EQ %s" % item_id
if "queryId" not in item_filter:
# TODO, we really should throw an exception here
return None
return self.get_object('metricQueryResult',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def metricRealTimeQuery(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('metricRealTimeQuery',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def metricService(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('metricService',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def metricValue(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('metricValue',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Protecting Data
# -----------------------------------------
def ldapServer(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ldapServer',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def remoteInterface(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('remoteInterface',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def replicationInterface(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('replicationInterface',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def replicationSession(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('replicationSession',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def snap(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('snap',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def snapSchedule(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('snapSchedule',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Quality of Service
# -----------------------------------------
def ioLimitPolicy(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ioLimitPolicy',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ioLimitRule(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ioLimitRule',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def ioLimitSetting(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('ioLimitSetting',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Servicing the System
# -----------------------------------------
def configCaptureResult(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('configCaptureResult',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def dataCollectionResult(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('dataCollectionResult',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def esrsParam(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('esrsParam',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def esrsPolicymanager(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('esrsPolicymanager',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def serviceAction(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('serviceAction',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def serviceInfo(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('serviceInfo',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def supportAsset(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('supportAsset',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def supportService(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('supportService',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def technicalAdvisory(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('technicalAdvisory',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Users and Security
# -----------------------------------------
def crl(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('crl',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def loginSessionInfo(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('loginSessionInfo',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def role(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('role',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def roleMapping(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('roleMapping',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def securitySettings(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('securitySettings',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def user(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('user',item_filter=item_filter,
item_id=item_id, item_name=item_name)
def x509Certificate(self, item_filter = None, item_id=None, item_name=None):
return self.get_object('x509Certificate',item_filter=item_filter,
item_id=item_id, item_name=item_name)
# Helper Functions
# -----------------------------------------
def delete_storageResource(self, lun_id):
response = self.delete('/instances/storageResource/%s' % lun_id)
return response
def delete_lun(self, lun_id):
""" Deletes a LUN based on the lun_id """
response = self.delete_storageResource(lun_id)
return response
def create_lun(self, lun_name, pool_id, size, lun_description=None):
""" Creates a new block LUN in pool_id, returns a lun object """
payload = {'name':lun_name,
'lunParameters':{'pool':{'id':pool_id},
'size':size}}
response = self.post('/types/storageResource/action/createLun',payload)
new_id = response.json()['content']['storageResource']['id']
return self.lun(item_id=new_id)
def create_lun_from_obj(self, lun_object):
""" Creates a new block LUN based on a lun_object being passed """
payload = {'name': lun_object.name,
'lunParameters':{'pool':{'id':lun_object.pool},
'size': lun_object.sizeTotal}}
response = self.post('/types/storageResource/action/createLun',payload)
new_id = response.json()['content']['storageResource']['id']
return self.lun(item_id=new_id)
def create_vmware_lun_from_obj(self, lun_object):
""" Creates a new block LUN based on a lun_object being passed """
payload = {'name': lun_object.name,
'lunParameters':{'pool':{'id':lun_object.pool},
'size': lun_object.sizeTotal}}
print payload
response = self.post('/types/storageResource/action/createVmwareLun',payload)
new_id = response.json()['content']['storageResource']['id']
return self.lun(item_id=new_id)
def create_vmware_lun(self, lun_name, pool_id, size, lun_description=None):
""" Creates a new block LUN in pool_id, returns a lun object """
payload = {'name':lun_name,
'lunParameters':{'pool':{'id':pool_id},
'size':size}}
response = self.post('/types/storageResource/action/createVmwareLun',payload)
new_id = response.json()['content']['storageResource']['id']
return self.lun(item_id=new_id)
def __repr__(self):
return "<Unity Array: %s>" % self.ip_addr
|
<gh_stars>10-100
# Copyright 2019 The OpenSDS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import errno
import os
from cryptography.fernet import Fernet
import logging
from logging.handlers import RotatingFileHandler
# flask server configuration
HOST = "127.0.0.1"
PORT = "5000"
config_file = 'orchestration.conf'
# logging configuration
LOGGING_FILE = "/var/log/opensds/orchestration.log"
LOGGING_FORMAT = "[%(asctime)s] [%(levelname)s] [%(filename)s] " \
"[%(funcName)s():%(lineno)s] [PID:%(process)d TID:%(thread)d] %(message)s"
LOGGING_LEVEL = "INFO"
logger = None
conf = None
def init_logging():
global logger
# Setting rotating files number to 5 and each will be of 1MB
server_log_file = RotatingFileHandler(
LOGGING_FILE,
maxBytes=10000,
backupCount=5
)
logger = logging.getLogger()
'''
Logging level is set to INFO
It will log all log messages with INFO, WARNING, ERROR and CRITICAL
To enable debug logging, change the INFO to DEBUG
Levels hierarchy CRITICAL>ERROR>WARNING>INFO>DEBUG>NOTSET
'''
logger.setLevel(logging.INFO)
formatter = logging.Formatter(LOGGING_FORMAT)
server_log_file.setFormatter(formatter)
logger.addHandler(server_log_file)
def init_config(file):
global conf
global HOST
global PORT
try:
conf = configparser.ConfigParser()
dataset = conf.read(file)
if len(dataset) == 0:
logger.error(
"Failed to open orchestration config file: [%s]" % file)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), file)
HOST = conf['orchestration']['host']
PORT = conf['orchestration']['port']
except Exception as ex:
raise ex
def get_workflow_config(file):
global conf
tech = ''
server = ''
user = ''
passwd = ''
try:
conf = configparser.ConfigParser()
dataset = conf.read(file)
if len(dataset) == 0:
logger.error(
"Failed to open orchestration config file: [%s]" % file)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), file)
server = conf.get('workflow', 'host')
user = conf.get('workflow', 'username')
tech = conf.get('workflow', 'tech')
encrypted = conf.get('workflow', 'encrypted_password')
if encrypted == 'false':
passwd = conf.get('workflow', 'password')
else:
phrase = conf.get('workflow', 'phrase')
ciphered_suite = Fernet(phrase.encode())
passwd = (ciphered_suite.decrypt(passwd.encode()))
except Exception as ex:
print(ex)
raise ex
return tech, server, user, passwd
def get_config(file, section, key):
global conf
value = ''
try:
conf = configparser.ConfigParser()
dataset = conf.read(file)
if len(dataset) == 0:
logger.error(
"Failed to open orchestration config file: [%s]" % file)
raise IOError(
errno.ENOENT, os.strerror(errno.ENOENT), file)
value = conf.get(section, key)
except Exception as ex:
print(ex)
raise ex
return value
init_logging()
# database configuration
DATABASE = {
'sqlalchemy.url': 'sqlite:///osdsorch.sqlite'
}
|
<filename>skibidi/backend/models.py
from typing import Protocol
from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
class Kind(models.Model):
kind_id = models.AutoField(primary_key=True)
kind_name = models.CharField(max_length=255, null=False, unique=True)
def __str__(self):
return f"{self.kind_name}"
class Anime(models.Model):
class Meta:
unique_together = (('name', 'season'),)
anime_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, null=False)
plot = models.CharField(max_length=1000, null=True, blank=True)
season = models.PositiveSmallIntegerField(null=False)
last_episode = models.PositiveSmallIntegerField(null=False)
start_number_episode = models.PositiveSmallIntegerField(null=False)
global_rating = models.PositiveSmallIntegerField(null=True, blank=True)
path = models.URLField(max_length=255, null=False)
last_update = models.DateField(max_length=255, null=True, blank=True)
autodownlodable = models.BooleanField(null=True, blank=True)
finished = models.BooleanField(null=True, blank=True)
img_source = models.CharField(max_length=255, null=False)
def __str__(self):
return f"{self.name} {self.season}"
class Episode(models.Model):
episode_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, null=False)
seen = models.PositiveSmallIntegerField(null=False)
e_anime = models.ForeignKey(Anime, related_name= 'e_anime',on_delete=CASCADE, null=False)
path = models.URLField(null=False)
def __str__(self):
return f"{self.e_anime} {self.name}"
class Watching(models.Model):
class Meta:
unique_together = (('w_anime', 'w_user', 'w_episode'), )
watching_id = models.AutoField(primary_key=True)
w_user = models.ForeignKey(User, related_name='w_user', on_delete=CASCADE, null=False)
w_anime = models.ForeignKey(Anime, related_name='w_anime', on_delete=CASCADE, null=False)
w_episode = models.ForeignKey(Episode, related_name= 'w_episode',on_delete=CASCADE, null=False)
seconds = models.PositiveSmallIntegerField(null=False, default=0) #TODO: convert to hh-mm-ss
def __str__(self):
return f"{self.w_episode}"
class KindAnime(models.Model):
class Meta:
unique_together = (('ka_anime', 'ka_kind'),)
kind_anime_id = models.AutoField(primary_key=True)
ka_anime = models.ForeignKey(Anime, related_name='ka_anime', on_delete=CASCADE, null=False)
ka_kind = models.ForeignKey(Kind, related_name='ka_kind', on_delete=CASCADE, null=False)
def __str__(self):
return f"{self.ka_anime} {self.ka_kind}"
class PersonalKind(models.Model):
class Meta:
unique_together = (('p_user', 'p_kind'),)
personal_kind_id = models.AutoField(primary_key=True)
p_user = models.ForeignKey(User, related_name='p_user' ,on_delete=CASCADE, null=False)
p_kind = models.ForeignKey(Kind, related_name='p_kind', on_delete=CASCADE, null=False)
def __str__(self):
return f"{self.p_kind}" |
import torch
from .util import enable_running_stats, disable_running_stats
import contextlib
from torch.distributed import ReduceOp
class GSAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, model, gsam_alpha, rho_scheduler, adaptive=False, perturb_eps=1e-12, grad_reduce='mean', **kwargs):
defaults = dict(adaptive=adaptive, **kwargs)
super(GSAM, self).__init__(params, defaults)
self.model = model
self.base_optimizer = base_optimizer
self.param_groups = self.base_optimizer.param_groups
self.adaptive = adaptive
self.rho_scheduler = rho_scheduler
self.perturb_eps = perturb_eps
self.alpha = gsam_alpha
# initialize self.rho_t
self.update_rho_t()
# set up reduction for gradient across workers
if grad_reduce.lower() == 'mean':
if hasattr(ReduceOp, 'AVG'):
self.grad_reduce = ReduceOp.AVG
self.manual_average = False
else: # PyTorch <= 1.11.0 does not have AVG, need to manually average across processes
self.grad_reduce = ReduceOp.SUM
self.manual_average = True
elif grad_reduce.lower() == 'sum':
self.grad_reduce = ReduceOp.SUM
self.manual_average = False
else:
raise ValueError('"grad_reduce" should be one of ["mean", "sum"].')
@torch.no_grad()
def update_rho_t(self):
self.rho_t = self.rho_scheduler.step()
return self.rho_t
@torch.no_grad()
def perturb_weights(self, rho=0.0):
grad_norm = self._grad_norm( weight_adaptive = self.adaptive )
for group in self.param_groups:
scale = rho / (grad_norm + self.perturb_eps)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_g"] = p.grad.data.clone()
e_w = p.grad * scale.to(p)
if self.adaptive:
e_w *= torch.pow(p, 2)
p.add_(e_w) # climb to the local maximum "w + e(w)"
self.state[p]['e_w'] = e_w
@torch.no_grad()
def unperturb(self):
for group in self.param_groups:
for p in group['params']:
if 'e_w' in self.state[p].keys():
p.data.sub_(self.state[p]['e_w'])
@torch.no_grad()
def gradient_decompose(self, alpha=0.0):
# calculate inner product
inner_prod = 0.0
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
inner_prod += torch.sum(
self.state[p]['old_g'] * p.grad.data
)
# get norm
new_grad_norm = self._grad_norm()
old_grad_norm = self._grad_norm(by='old_g')
# get cosine
cosine = inner_prod / (new_grad_norm * old_grad_norm + self.perturb_eps)
# gradient decomposition
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
vertical = self.state[p]['old_g'] - cosine * old_grad_norm * p.grad.data / (new_grad_norm + self.perturb_eps)
p.grad.data.add_( vertical, alpha=-alpha)
@torch.no_grad()
def _sync_grad(self):
if torch.distributed.is_initialized(): # synchronize final gardients
for group in self.param_groups:
for p in group['params']:
if p.grad is None: continue
if self.manual_average:
torch.distributed.all_reduce(p.grad, op=self.grad_reduce)
world_size = torch.distributed.get_world_size()
p.grad.div_(float(world_size))
else:
torch.distributed.all_reduce(p.grad, op=self.grad_reduce)
return
@torch.no_grad()
def _grad_norm(self, by=None, weight_adaptive=False):
#shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
if not by:
norm = torch.norm(
torch.stack([
( (torch.abs(p.data) if weight_adaptive else 1.0) * p.grad).norm(p=2)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
else:
norm = torch.norm(
torch.stack([
( (torch.abs(p.data) if weight_adaptive else 1.0) * self.state[p][by]).norm(p=2)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
def maybe_no_sync(self):
if torch.distributed.is_initialized():
return self.model.no_sync()
else:
return contextlib.ExitStack()
@torch.no_grad()
def set_closure(self, loss_fn, inputs, targets, **kwargs):
# create self.forward_backward_func, which is a function such that
# self.forward_backward_func() automatically performs forward and backward passes.
# This function does not take any arguments, and the inputs and targets data
# should be pre-set in the definition of partial-function
def get_grad():
self.base_optimizer.zero_grad()
with torch.enable_grad():
outputs = self.model(inputs)
loss = loss_fn(outputs, targets, **kwargs)
loss_value = loss.data.clone().detach()
loss.backward()
return outputs, loss_value
self.forward_backward_func = get_grad
@torch.no_grad()
def step(self, closure=None):
if closure:
get_grad = closure
else:
get_grad = self.forward_backward_func
with self.maybe_no_sync():
# get gradient
outputs, loss_value = get_grad()
# perturb weights
self.perturb_weights(rho=self.rho_t)
# disable running stats for second pass
disable_running_stats(self.model)
# get gradient at perturbed weights
get_grad()
# decompose and get new update direction
self.gradient_decompose(self.alpha)
# unperturb
self.unperturb()
# synchronize gradients across workers
self._sync_grad()
# update with new directions
self.base_optimizer.step()
# enable running stats
enable_running_stats(self.model)
return outputs, loss_value
|
"""
BAE (BAE: BERT-Based Adversarial Examples)
============================================
"""
from textattack.constraints.grammaticality import PartOfSpeech
from textattack.constraints.pre_transformation import (
RepeatModification,
StopwordModification,
)
from textattack.constraints.semantics.sentence_encoders import UniversalSentenceEncoder
from textattack.goal_functions import UntargetedClassification
from textattack.search_methods import GreedyWordSwapWIR
from textattack.transformations import WordSwapMaskedLM
from .attack_recipe import AttackRecipe
class BAEGarg2019(AttackRecipe):
"""<NAME> and <NAME>, 2019.
BAE: BERT-based Adversarial Examples for Text Classification.
https://arxiv.org/pdf/2004.01970
This is "attack mode" 1 from the paper, BAE-R, word replacement.
We present 4 attack modes for BAE based on the
R and I operations, where for each token t in S:
• BAE-R: Replace token t (See Algorithm 1)
• BAE-I: Insert a token to the left or right of t
• BAE-R/I: Either replace token t or insert a
token to the left or right of t
• BAE-R+I: First replace token t, then insert a
token to the left or right of t
"""
@staticmethod
def build(model):
# "In this paper, we present a simple yet novel technique: BAE (BERT-based
# Adversarial Examples), which uses a language model (LM) for token
# replacement to best fit the overall context. We perturb an input sentence
# by either replacing a token or inserting a new token in the sentence, by
# means of masking a part of the input and using a LM to fill in the mask."
#
# We only consider the top K=50 synonyms from the MLM predictions.
#
# [from email correspondance with the author]
# "When choosing the top-K candidates from the BERT masked LM, we filter out
# the sub-words and only retain the whole words (by checking if they are
# present in the GloVE vocabulary)"
#
transformation = WordSwapMaskedLM(
method="bae", max_candidates=50, min_confidence=0.0
)
#
# Don't modify the same word twice or stopwords.
#
constraints = [RepeatModification(), StopwordModification()]
# For the R operations we add an additional check for
# grammatical correctness of the generated adversarial example by filtering
# out predicted tokens that do not form the same part of speech (POS) as the
# original token t_i in the sentence.
constraints.append(PartOfSpeech(allow_verb_noun_swap=True))
# "To ensure semantic similarity on introducing perturbations in the input
# text, we filter the set of top-K masked tokens (K is a pre-defined
# constant) predicted by BERT-MLM using a Universal Sentence Encoder (USE)
# (Cer et al., 2018)-based sentence similarity scorer."
#
# "[We] set a threshold of 0.8 for the cosine similarity between USE-based
# embeddings of the adversarial and input text."
#
# [from email correspondence with the author]
# "For a fair comparison of the benefits of using a BERT-MLM in our paper,
# we retained the majority of TextFooler's specifications. Thus we:
# 1. Use the USE for comparison within a window of size 15 around the word
# being replaced/inserted.
# 2. Set the similarity score threshold to 0.1 for inputs shorter than the
# window size (this translates roughly to almost always accepting the new text).
# 3. Perform the USE similarity thresholding of 0.8 with respect to the text
# just before the replacement/insertion and not the original text (For
# example: at the 3rd R/I operation, we compute the USE score on a window
# of size 15 of the text obtained after the first 2 R/I operations and not
# the original text).
# ...
# To address point (3) from above, compare the USE with the original text
# at each iteration instead of the current one (While doing this change
# for the R-operation is trivial, doing it for the I-operation with the
# window based USE comparison might be more involved)."
#
# Finally, since the BAE code is based on the TextFooler code, we need to
# adjust the threshold to account for the missing / pi in the cosine
# similarity comparison. So the final threshold is 1 - (1 - 0.8) / pi
# = 1 - (0.2 / pi) = 0.936338023.
use_constraint = UniversalSentenceEncoder(
threshold=0.936338023,
metric="cosine",
compare_against_original=True,
window_size=15,
skip_text_shorter_than_window=True,
)
constraints.append(use_constraint)
#
# Goal is untargeted classification.
#
goal_function = UntargetedClassification(model)
#
# "We estimate the token importance Ii of each token
# t_i ∈ S = [t1, . . . , tn], by deleting ti from S and computing the
# decrease in probability of predicting the correct label y, similar
# to (Jin et al., 2019).
#
# • "If there are multiple tokens can cause C to misclassify S when they
# replace the mask, we choose the token which makes Sadv most similar to
# the original S based on the USE score."
# • "If no token causes misclassification, we choose the perturbation that
# decreases the prediction probability P(C(Sadv)=y) the most."
#
search_method = GreedyWordSwapWIR(wir_method="delete")
return BAEGarg2019(goal_function, constraints, transformation, search_method)
|
<gh_stars>10-100
"""Helps to collect information about the host of an experiment."""
import os
import platform
import re
import subprocess
from xml.etree import ElementTree
import warnings
from typing import List
import cpuinfo
from sacred.utils import optional_kwargs_decorator
from sacred.settings import SETTINGS
__all__ = ("host_info_gatherers", "get_host_info", "host_info_getter")
# Legacy global dict of functions that are used
# to collect the host information.
host_info_gatherers = {}
class IgnoreHostInfo(Exception):
"""Used by host_info_getters to signal that this cannot be gathered."""
class HostInfoGetter:
def __init__(self, getter_function, name):
self.getter_function = getter_function
self.name = name
def __call__(self):
return self.getter_function()
def get_info(self):
return self.getter_function()
def host_info_gatherer(name):
def wrapper(f):
return HostInfoGetter(f, name)
return wrapper
def check_additional_host_info(additional_host_info: List[HostInfoGetter]):
names_taken = [x.name for x in _host_info_gatherers_list]
for getter in additional_host_info:
if getter.name in names_taken:
error_msg = (
"Key {} used in `additional_host_info` already exists as a "
"default gatherer function. Do not use the following keys: "
"{}"
).format(getter.name, names_taken)
raise KeyError(error_msg)
def get_host_info(additional_host_info: List[HostInfoGetter] = None):
"""Collect some information about the machine this experiment runs on.
Returns
-------
dict
A dictionary with information about the CPU, the OS and the
Python version of this machine.
"""
additional_host_info = additional_host_info or []
# can't use += because we don't want to modify the mutable argument.
additional_host_info = additional_host_info + _host_info_gatherers_list
all_host_info_gatherers = host_info_gatherers.copy()
for getter in additional_host_info:
all_host_info_gatherers[getter.name] = getter
host_info = {}
for k, v in all_host_info_gatherers.items():
try:
host_info[k] = v()
except IgnoreHostInfo:
pass
return host_info
@optional_kwargs_decorator
def host_info_getter(func, name=None):
"""
The decorated function is added to the process of collecting the host_info.
This just adds the decorated function to the global
``sacred.host_info.host_info_gatherers`` dictionary.
The functions from that dictionary are used when collecting the host info
using :py:func:`~sacred.host_info.get_host_info`.
Parameters
----------
func : callable
A function that can be called without arguments and returns some
json-serializable information.
name : str, optional
The name of the corresponding entry in host_info.
Defaults to the name of the function.
Returns
-------
The function itself.
"""
warnings.warn(
"The host_info_getter is deprecated. "
"Please use the `additional_host_info` argument"
" in the Experiment constructor.",
DeprecationWarning,
)
name = name or func.__name__
host_info_gatherers[name] = func
return func
# #################### Default Host Information ###############################
@host_info_gatherer(name="hostname")
def _hostname():
return platform.node()
@host_info_gatherer(name="os")
def _os():
return [platform.system(), platform.platform()]
@host_info_gatherer(name="python_version")
def _python_version():
return platform.python_version()
@host_info_gatherer(name="cpu")
def _cpu():
if platform.system() == "Windows":
return _get_cpu_by_pycpuinfo()
try:
if platform.system() == "Darwin":
return _get_cpu_by_sysctl()
elif platform.system() == "Linux":
return _get_cpu_by_proc_cpuinfo()
except Exception:
# Use pycpuinfo only if other ways fail, since it takes about 1 sec
return _get_cpu_by_pycpuinfo()
@host_info_gatherer(name="gpus")
def _gpus():
if not SETTINGS.HOST_INFO.INCLUDE_GPU_INFO:
return
try:
xml = subprocess.check_output(["nvidia-smi", "-q", "-x"]).decode()
except (FileNotFoundError, OSError, subprocess.CalledProcessError):
raise IgnoreHostInfo()
gpu_info = {"gpus": []}
for child in ElementTree.fromstring(xml):
if child.tag == "driver_version":
gpu_info["driver_version"] = child.text
if child.tag != "gpu":
continue
gpu = {
"model": child.find("product_name").text,
"total_memory": int(
child.find("fb_memory_usage").find("total").text.split()[0]
),
"persistence_mode": (child.find("persistence_mode").text == "Enabled"),
}
gpu_info["gpus"].append(gpu)
return gpu_info
@host_info_gatherer(name="ENV")
def _environment():
keys_to_capture = SETTINGS.HOST_INFO.CAPTURED_ENV
return {k: os.environ[k] for k in keys_to_capture if k in os.environ}
_host_info_gatherers_list = [_hostname, _os, _python_version, _cpu, _gpus, _environment]
# ################### Get CPU Information ###############################
def _get_cpu_by_sysctl():
os.environ["PATH"] += ":/usr/sbin"
command = ["sysctl", "-n", "machdep.cpu.brand_string"]
return subprocess.check_output(command).decode().strip()
def _get_cpu_by_proc_cpuinfo():
command = ["cat", "/proc/cpuinfo"]
all_info = subprocess.check_output(command).decode()
model_pattern = re.compile(r"^\s*model name\s*:")
for line in all_info.split("\n"):
if model_pattern.match(line):
return model_pattern.sub("", line, 1).strip()
def _get_cpu_by_pycpuinfo():
return cpuinfo.get_cpu_info().get("brand", "Unknown")
|
import torch
from torch.nn import Sigmoid
from transformers.modeling_bert import BertModelWithHeads
import pytorch_lightning as pl
from pytorch_lightning.metrics import Metric
import numpy as np
from torch.nn.modules.loss import BCELoss, BCEWithLogitsLoss
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# class SimpleClassifier(BertModelWithHeads):
# def __init__(self, config):
# super().__init__(config)
# self.sig = Sigmoid()
# def forward(self, input_ids, attention_mask):
# out = super().forward(input_ids=input_ids,
# attention_mask=attention_mask,
# return_dict=True)['logits']
# return self.sig(out)
class EarlyStoppingWithColdStart(EarlyStopping):
def __init__(self, monitor: str, min_delta: float, patience: int, verbose: bool, mode: str, strict: bool, cold_start_epochs: int = 0):
super().__init__(monitor=monitor, min_delta=min_delta, patience=patience, verbose=verbose, mode=mode, strict=strict)
self.cold_start_epoch_number = cold_start_epochs
def on_validation_end(self, trainer, pl_module):
if trainer.running_sanity_check:
return
elif pl_module.current_epoch < self.cold_start_epoch_number:
return
else:
self._run_early_stopping_check(trainer, pl_module)
class adapterPLWrapper(pl.LightningModule):
def __init__(self, adapterClassifier, id2label, lr) -> None:
super().__init__()
self.classifier = adapterClassifier
self.id2label = id2label
self.lr = lr
self.criterion = BCEWithLogitsLoss(pos_weight=torch.full((len(id2label),), 1.))
self.sig = Sigmoid()
self.declare_metrics(self.id2label)
def declare_metrics(self, id2label):
self.micro_precision = pl.metrics.classification.precision_recall.Precision(num_classes=len(self.id2label),
average='micro',
multilabel=True)
# self.micro_precision = self.micro_precision.to('cpu')
self.micro_recall = pl.metrics.classification.precision_recall.Recall(num_classes=len(self.id2label),
average='micro',
multilabel=True)
# self.micro_recall = self.micro_recall.to('cpu')
self.micro_f1 = pl.metrics.classification.F1(num_classes=len(self.id2label),
average='micro',
multilabel=True)
# self.micro_f1 = self.micro_f1.to('cpu')
self.macro_precision = pl.metrics.classification.precision_recall.Precision(num_classes=len(self.id2label),
average='macro',
multilabel=True)
# self.macro_precision = self.macro_precision.to('cpu')
self.macro_recall = pl.metrics.classification.precision_recall.Recall(num_classes=len(self.id2label),
average='macro',
multilabel=True)
# self.macro_recall = self.macro_recall.to('cpu')
self.macro_f1 = pl.metrics.classification.F1(num_classes=len(self.id2label),
average='macro',
multilabel=True)
# self.macro_f1 = self.macro_f1.to('cpu')
self.my_metrics = MyMetrics(id2label=id2label)
# self.my_metrics = self.my_metrics.to('cpu')
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
def forward(self, input_ids, attention_mask):
out = self.classifier(input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True)
return out['logits']
def training_step(self, batch, batch_step):
batched_sentences, batched_attn, batched_labels = batch
model_output = self(batched_sentences, batched_attn)
loss = self.criterion(model_output, batched_labels)
self.log('losses/train_loss', loss.detach(), on_epoch=True, on_step=False)
return loss
def validation_step(self, batch, batch_step):
batched_sentences, batched_attn, batched_labels = batch
model_output = self(batched_sentences, batched_attn)
val_loss = self.criterion(model_output, batched_labels)
self.log('losses/val_loss', val_loss.detach(), on_epoch=True, on_step=False)
sigmoided_output = self.sig(model_output.detach())
self.update_metrics(pred = sigmoided_output, labels=batched_labels.detach())
return val_loss
def validation_epoch_end(self, out):
self.compute_metrics()
def compute_metrics(self):
self.log('micro/micro_f1', self.micro_f1.compute())
self.log('micro/micro_p', self.micro_precision.compute())
self.log('micro/micro_r', self.micro_recall.compute())
self.log('macro/macro_f1', self.macro_f1.compute())
self.log('macro/macro_p', self.macro_precision.compute())
self.log('macro/macro_r', self.macro_recall.compute())
avg_pred_number, void_predictions, _, _, _, ma_p, ma_r, ma_f1, predicted_class_number = self.my_metrics.compute()
self.log('example_macro/macro_f1', ma_f1)
self.log('example_macro/macro_p', ma_p)
self.log('example_macro/macro_r', ma_r)
self.log('other_metrics/avg_pred_number', avg_pred_number)
self.log('other_metrics/void_predictions', void_predictions)
self.log('other_metrics/predicted_class_number', predicted_class_number)
def update_metrics(self, pred, labels):
pred = self.get_discrete_pred(pred)
labels = labels.int()
self.micro_f1.update(preds=pred, target=labels)
self.micro_precision.update(preds=pred, target=labels)
self.micro_recall.update(preds=pred, target=labels)
self.macro_f1.update(preds=pred, target=labels)
self.macro_precision.update(preds=pred, target=labels)
self.macro_recall.update(preds=pred, target=labels)
self.my_metrics.update(preds=pred, target=labels)
def get_discrete_pred(self, pred, threshold = 0.5):
mask = pred > threshold
ones = torch.ones(mask.shape).cuda()
zeros = torch.zeros(mask.shape).cuda()
discrete_pred = torch.where(mask, ones, zeros)
max_values_and_indices = torch.max(pred, dim = 1)
for dp, i in zip(discrete_pred, max_values_and_indices.indices):
dp[i] = 1
return discrete_pred
class MyMetrics(Metric):
def __init__(self, id2label ,dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.id2label=id2label
self.label2id = {v:k for k,v in id2label.items()}
self.pred_classes = []
self.true_classes = []
# self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
# self.add_state("predicted", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, target: torch.Tensor):
p, t = self.logits_and_one_hot_labels_to_string(logits=preds, one_hot_labels=target)
self.pred_classes.extend(p)
self.true_classes.extend(t)
def compute(self):
assert len(self.pred_classes) == len(self.true_classes), "Error in id2label traduction"
avg_pred_number, void_predictions, p, r, f1, ma_p, ma_r, ma_f1 = self.compute_metrics(pred_classes=self.pred_classes,
true_classes=self.true_classes)
# predicted_class_number = len(set([c for sub in self.pred_classes for c in sub ]))
predicted_class = [set(sub) for sub in self.pred_classes]
predicted_class_number = len(set.union(*predicted_class))
self.pred_classes = []
self.true_classes = []
return avg_pred_number, void_predictions, p, r, f1, ma_p, ma_r, ma_f1, predicted_class_number
def logits_and_one_hot_labels_to_string(self, logits, one_hot_labels, no_void = False, threshold = 0.5):
pred_classes, true_classes = [], []
for example_logits, example_labels in zip(logits, one_hot_labels):
mask = example_logits > threshold
if no_void:
argmax = np.argmax(example_logits)
pc = self.id2label[argmax]
p_classes = [self.id2label[i] for i, m in enumerate(mask) if m]
if pc not in p_classes:
p_classes.append(pc)
pred_classes.append(p_classes)
else:
true_indexes = mask.nonzero(as_tuple=True)[0]
pred_classes.append([self.id2label[m.item()] for m in true_indexes])
mask = example_labels > .5
true_indexes = mask.nonzero(as_tuple=True)[0]
true_classes.append([self.id2label[l.item()] for l in true_indexes])
assert len(pred_classes) == len(true_classes), "Error in id2label traduction"
return pred_classes, true_classes
def compute_metrics(self, pred_classes, true_classes):
correct_counter = 0
prediction_counter = 0
true_labels_counter = 0
precision_sum = 0
recall_sum = 0
f1_sum = 0
void_prediction_counter = 0
for example_pred, example_true in zip(pred_classes, true_classes):
assert len(example_true) > 0, 'Error in true label traduction'
prediction_counter += len(example_pred)
true_labels_counter += len(example_true)
if not example_pred:
void_prediction_counter += 1
else:
correct_predictions = len(set(example_pred).intersection(set(example_true)))
correct_counter += correct_predictions
p = correct_predictions / len(example_pred)
r = correct_predictions / len(example_true)
f1 = self.compute_f1(p, r)
precision_sum += p
recall_sum += r
f1_sum += f1
if prediction_counter:
micro_p = correct_counter / prediction_counter
else:
micro_p = 0
micro_r = correct_counter / true_labels_counter
micro_f1 = self.compute_f1(micro_p, micro_r)
examples_in_dataset = len(true_classes)
macro_p = precision_sum / examples_in_dataset
macro_r = recall_sum / examples_in_dataset
macro_f1 = f1_sum / examples_in_dataset
avg_pred_number = prediction_counter / examples_in_dataset
return avg_pred_number, void_prediction_counter, micro_p, micro_r, micro_f1, macro_p, macro_r, macro_f1
def compute_f1(self, p, r):
return (2*p*r)/(p + r) if (p + r) else 0
|
#!/usr/bin/env python3
import json
import pathlib
HERE = pathlib.Path(__file__).parent
DATAFILE = HERE / "../port/wasm/_common/eiafx/eiafx-data.json"
GOFILE = HERE / "effect_numbers.go"
NAME = {
0: "api.ArtifactSpec_LUNAR_TOTEM",
3: "api.ArtifactSpec_NEODYMIUM_MEDALLION",
4: "api.ArtifactSpec_BEAK_OF_MIDAS",
5: "api.ArtifactSpec_LIGHT_OF_EGGENDIL",
6: "api.ArtifactSpec_DEMETERS_NECKLACE",
7: "api.ArtifactSpec_VIAL_MARTIAN_DUST",
8: "api.ArtifactSpec_ORNATE_GUSSET",
9: "api.ArtifactSpec_THE_CHALICE",
10: "api.ArtifactSpec_BOOK_OF_BASAN",
11: "api.ArtifactSpec_PHOENIX_FEATHER",
12: "api.ArtifactSpec_TUNGSTEN_ANKH",
21: "api.ArtifactSpec_AURELIAN_BROOCH",
22: "api.ArtifactSpec_CARVED_RAINSTICK",
23: "api.ArtifactSpec_PUZZLE_CUBE",
24: "api.ArtifactSpec_QUANTUM_METRONOME",
25: "api.ArtifactSpec_SHIP_IN_A_BOTTLE",
26: "api.ArtifactSpec_TACHYON_DEFLECTOR",
27: "api.ArtifactSpec_INTERSTELLAR_COMPASS",
28: "api.ArtifactSpec_DILITHIUM_MONOCLE",
29: "api.ArtifactSpec_TITANIUM_ACTUATOR",
30: "api.ArtifactSpec_MERCURYS_LENS",
1: "api.ArtifactSpec_TACHYON_STONE",
31: "api.ArtifactSpec_DILITHIUM_STONE",
32: "api.ArtifactSpec_SHELL_STONE",
33: "api.ArtifactSpec_LUNAR_STONE",
34: "api.ArtifactSpec_SOUL_STONE",
39: "api.ArtifactSpec_PROPHECY_STONE",
36: "api.ArtifactSpec_QUANTUM_STONE",
37: "api.ArtifactSpec_TERRA_STONE",
38: "api.ArtifactSpec_LIFE_STONE",
40: "api.ArtifactSpec_CLARITY_STONE",
13: "api.ArtifactSpec_EXTRATERRESTRIAL_ALUMINUM",
14: "api.ArtifactSpec_ANCIENT_TUNGSTEN",
15: "api.ArtifactSpec_SPACE_ROCKS",
16: "api.ArtifactSpec_ALIEN_WOOD",
17: "api.ArtifactSpec_GOLD_METEORITE",
18: "api.ArtifactSpec_TAU_CETI_GEODE",
19: "api.ArtifactSpec_CENTAURIAN_STEEL",
20: "api.ArtifactSpec_ERIDANI_FEATHER",
35: "api.ArtifactSpec_DRONE_PARTS",
41: "api.ArtifactSpec_CELESTIAL_BRONZE",
42: "api.ArtifactSpec_LALANDE_HIDE",
43: "api.ArtifactSpec_SOLAR_TITANIUM",
2: "api.ArtifactSpec_TACHYON_STONE_FRAGMENT",
44: "api.ArtifactSpec_DILITHIUM_STONE_FRAGMENT",
45: "api.ArtifactSpec_SHELL_STONE_FRAGMENT",
46: "api.ArtifactSpec_LUNAR_STONE_FRAGMENT",
47: "api.ArtifactSpec_SOUL_STONE_FRAGMENT",
48: "api.ArtifactSpec_PROPHECY_STONE_FRAGMENT",
49: "api.ArtifactSpec_QUANTUM_STONE_FRAGMENT",
50: "api.ArtifactSpec_TERRA_STONE_FRAGMENT",
51: "api.ArtifactSpec_LIFE_STONE_FRAGMENT",
52: "api.ArtifactSpec_CLARITY_STONE_FRAGMENT",
10000: "api.ArtifactSpec_UNKNOWN",
}
LEVEL = {
0: "api.ArtifactSpec_INFERIOR",
1: "api.ArtifactSpec_LESSER",
2: "api.ArtifactSpec_NORMAL",
3: "api.ArtifactSpec_GREATER",
4: "api.ArtifactSpec_SUPERIOR",
}
RARITY = {
0: "api.ArtifactSpec_COMMON",
1: "api.ArtifactSpec_RARE",
2: "api.ArtifactSpec_EPIC",
3: "api.ArtifactSpec_LEGENDARY",
}
"""
var _effectDeltas = map[item]float64{
{
Name: 0,
Level: 0,
Rarity: 0,
}: 1.0,
}
"""
def main():
with GOFILE.open("w") as fout:
fout.write(
"""\
// Code generated by "python3 effect_numbers.py"; DO NOT EDIT.
package artifacts
import "github.com/fanaticscripter/EggContractor/api"
var _effectDeltas = map[item]float64{
"""
)
with DATAFILE.open() as fin:
data = json.load(fin)
for family in data["artifact_families"]:
for tier in family["tiers"]:
if tier["effects"] is None:
continue
name = NAME[tier["afx_id"]]
level = LEVEL[tier["afx_level"]]
for rarity_effect in tier["effects"]:
rarity = RARITY[rarity_effect["afx_rarity"]]
delta = effect_delta(rarity_effect["effect_size"])
fout.write(
f"""\
{{
Name: {name},
Level: {level},
Rarity: {rarity},
}}: {delta},
"""
)
fout.write(
"""\
}
"""
)
def effect_delta(effect_size: str) -> float:
s = effect_size
if s == "Guaranteed":
return 0
if s[-1] == "x":
return float(s[:-1]) - 1
if s[-1] == "%":
return float(s[:-1]) * 0.01
return float(s)
if __name__ == "__main__":
main()
|
<filename>venv/lib/python3.6/site-packages/ansible_collections/inspur/sm/plugins/modules/edit_ad.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright (C) 2020 Inspur Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: edit_ad
version_added: "0.1.0"
author:
- WangBaoshan (@ISIB-group)
short_description: Set active directory information.
description:
- Set active directory information on Inspur server.
options:
enable:
description:
- Active Directory Authentication Status.
choices: ['enable', 'disable']
type: str
name:
description:
- Secret Username.
type: str
code:
description:
- Secret Password.
type: str
timeout:
description:
- The Time Out configuration(15-300).
type: int
domain:
description:
- User Domain Name.
type: str
addr1:
description:
- Domain Controller Server Address1.
type: str
addr2:
description:
- Domain Controller Server Address2.
type: str
addr3:
description:
- Domain Controller Server Address3.
type: str
extends_documentation_fragment:
- inspur.sm.ism
'''
EXAMPLES = '''
- name: Ad test
hosts: ism
connection: local
gather_facts: no
vars:
ism:
host: "{{ ansible_ssh_host }}"
username: "{{ username }}"
password: "{{ password }}"
tasks:
- name: "Set active directory information"
inspur.sm.edit_ad:
enable: "disable"
provider: "{{ ism }}"
- name: "Set active directory information"
inspur.sm.edit_ad:
enable: "enable"
name: "inspur"
code: "123456"
timeout: 120
domain: "inspur.com"
addr1: "172.16.31.10"
addr2: "172.16.31.10"
addr3: "172.16.17.32"
provider: "{{ ism }}"
'''
RETURN = '''
message:
description: Messages returned after module execution.
returned: always
type: str
state:
description: Status after module execution.
returned: always
type: str
changed:
description: Check to see if a change was made on the device.
returned: always
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.inspur.sm.plugins.module_utils.ism import (ism_argument_spec, get_connection)
class AD(object):
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.results = dict()
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=False)
def run_command(self):
self.module.params['subcommand'] = 'setad'
self.results = get_connection(self.module)
if self.results['State'] == 'Success':
self.results['changed'] = True
def show_result(self):
"""Show result"""
self.module.exit_json(**self.results)
def work(self):
"""Worker"""
self.run_command()
self.show_result()
def main():
argument_spec = dict(
enable=dict(type='str', required=False, choices=['enable', 'disable']),
name=dict(type='str', required=False),
code=dict(type='str', required=False),
timeout=dict(type='int', required=False),
domain=dict(type='str', required=False),
addr1=dict(type='str', required=False),
addr2=dict(type='str', required=False),
addr3=dict(type='str', required=False),
)
argument_spec.update(ism_argument_spec)
ad_obj = AD(argument_spec)
ad_obj.work()
if __name__ == '__main__':
main()
|
<filename>utils/arc.py
from .lib.dequedict import DequeDict
class ARC:
class ARC_Entry:
def __init__(self, oblock):
self.oblock = oblock
def __repr__(self):
return "({})".format(self.oblock)
def __init__(self, cache_size, **kwargs):
self.cache_size = cache_size
self.p = 0
self.T1 = DequeDict()
self.T2 = DequeDict()
self.B1 = DequeDict()
self.B2 = DequeDict()
self.time = 0
def __contains__(self, oblock):
return oblock in self.T1 or oblock in self.T2
def cacheFull(self):
return len(self.T1) + len(self.T2) == self.cache_size
def addToCache(self, oblock):
x = self.ARC_Entry(oblock)
self.T1[oblock] = x
def moveToList(self, entry, arc_list):
arc_list[entry.oblock] = entry
def hit(self, oblock, arc_list):
x = arc_list[oblock]
del arc_list[oblock]
self.moveToList(x, self.T2)
def evictFromList(self, arc_list):
assert (len(arc_list) > 0)
return arc_list.popFirst()
def evict(self):
len_L1 = len(self.T1) + len(self.B1)
len_L2 = len(self.T2) + len(self.B2)
if len_L1 >= self.cache_size:
if len(self.T1) < self.cache_size:
hist_evict = self.evictFromList(self.B1)
evicted = self.replace()
else:
evicted = self.evictFromList(self.T1)
elif len_L1 < self.cache_size and len_L1 + len_L2 >= self.cache_size:
if len_L1 + len_L2 == 2 * self.cache_size:
self.evictFromList(self.B2)
evicted = self.replace()
return evicted.oblock
def replace(self, x_in_B2=False):
if len(self.T1) > 0 and ((x_in_B2 and len(self.T1) == self.p)
or len(self.T1) > self.p):
evicted = self.evictFromList(self.T1)
self.moveToList(evicted, self.B1)
else:
evicted = self.evictFromList(self.T2)
self.moveToList(evicted, self.B2)
return evicted
def missInHistory(self, oblock, history):
x = history[oblock]
x_in_B2 = oblock in self.B2
del history[oblock]
evicted = self.replace(x_in_B2)
self.moveToList(x, self.T2)
return evicted.oblock
def miss(self, oblock):
evicted = None
if oblock in self.B1:
self.p = min(self.p + max(1,
len(self.B2) // len(self.B1)),
self.cache_size)
evicted = self.missInHistory(oblock, self.B1)
elif oblock in self.B2:
self.p = max(self.p - max(1, len(self.B1) // len(self.B2)), 0)
evicted = self.missInHistory(oblock, self.B2)
else:
if self.cacheFull():
evicted = self.evict()
self.addToCache(oblock)
return evicted
def request(self, oblock):
miss = True
evicted = None
self.time += 1
if oblock in self:
miss = False
if oblock in self.T1:
self.hit(oblock, self.T1)
else:
self.hit(oblock, self.T2)
else:
evicted = self.miss(oblock)
return miss, evicted
|
import os
from parsimonious.grammar import Grammar
from parsimonious.nodes import NodeVisitor
class _NodeSetVisitor(NodeVisitor):
def visit_sentence(self, _, children):
return children[0]
def visit_list_form(self, _, children):
value = children[0]
for item in children[1]:
value = value.union(item[1])
return value
def visit_list_entry(self, _, children):
v = children[0]
if isinstance(v, int):
return {v}
elif isinstance(v, range):
return set(v)
else:
raise Exception("don't know how to handle")
def visit_mask_form(self, _, children):
values = [children[0]]
for item in children[1]:
values.insert(0, item[1])
bit = 0
result = set()
for value in values:
for n in range(32):
if value & 1:
result.add(bit)
value = value >> 1
bit = bit + 1
return result
def visit_mask_entry(self, value, _):
return int(value.text, 16)
def visit_range(self, _, children):
low, _, high = children
return range(low, high + 1)
def visit_number(self, number, _):
return int(number.text)
def generic_visit(self, node, children):
return children or node
class NodeSetParser:
grammar = Grammar(
"""
sentence = (mask_form / list_form)
list_form = list_entry (comma list_entry)*
list_entry = (range / number)
mask_form = mask_entry (comma mask_entry)*
mask_entry = ~"[0-9a-f]{8}"
range = number minus number
number = ~"(0|[1-9][0-9]*)"
comma = ","
minus = "-"
""")
@classmethod
def parse(cls, string_representation):
tree = cls.grammar.parse(string_representation.strip())
return _NodeSetVisitor().visit(tree)
class NodeSet:
def __init__(self, initial=None):
if not initial:
self.nodes = set()
elif isinstance(initial, set):
self.nodes = initial
elif isinstance(initial, str):
self.nodes = NodeSetParser.parse(initial)
else:
raise Exception("unable to initialize NodeSet")
def __repr__(self):
return f"{self.__class__.__name__} {self.to_list_form()}"
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def possible(self):
raise Exception("must properly override")
def negation(self):
return self.__class__({cpu for cpu in self.possible() if cpu not in self.nodes})
def union(self, other):
return self.__class__(self.nodes.union(other.nodes))
def intersection(self, other):
return self.__class__(self.nodes.intersection(other.nodes))
def to_list_form(self):
nums = sorted(self.nodes)
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 1 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
segments = []
for r in zip(edges, edges):
if r[0] == r[1]:
segments.append(str(r[0]))
else:
segments.append("{}-{}".format(r[0], r[1]))
return ",".join(segments)
class CPUNodeSet(NodeSet):
@staticmethod
def __cpu_path(node, path=""):
return f"/sys/devices/system/cpu/cpu{node}{path}"
def possible(self):
from vfio_isolate.system import possible_cpus
return possible_cpus()
def is_valid(self):
for node in self:
if not os.path.exists(self.__cpu_path(node)):
return False
return True
class NUMANodeSet(NodeSet):
@staticmethod
def __node_path(node, path=""):
return f"/sys/devices/system/node/node{node}{path}"
def is_valid(self):
for node in self:
if not os.path.exists(self.__node_path(node)):
return False
return True
def possible(self):
from vfio_isolate.system import possible_nodes
return possible_nodes()
def get_cpu_nodeset(self):
cpus = CPUNodeSet()
for node in self:
with open(self.__node_path(node, "/cpulist")) as f:
cpus = cpus.union(CPUNodeSet(f.read()))
return cpus
if __name__ == "__main__":
print(NUMANodeSet("0").negation().get_cpu_nodeset().negation())
|
<reponame>HollyXie/wae
# Copyright 2017 <NAME> Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Tensorflow ops used by GAN.
"""
import tensorflow as tf
import numpy as np
import logging
def lrelu(x, leak=0.3):
return tf.maximum(x, leak * x)
def batch_norm(opts, _input, is_train, reuse, scope, scale=True):
"""Batch normalization based on tf.contrib.layers.
"""
return tf.contrib.layers.batch_norm(
_input, center=True, scale=scale,
epsilon=opts['batch_norm_eps'], decay=opts['batch_norm_decay'],
is_training=is_train, reuse=reuse, updates_collections=None,
scope=scope, fused=False)
def upsample_nn(input_, new_size, scope=None, reuse=None):
"""NN up-sampling
"""
with tf.variable_scope(scope or "upsample_nn", reuse=reuse):
result = tf.image.resize_nearest_neighbor(input_, new_size)
return result
def downsample(input_, d_h=2, d_w=2, conv_filters_dim=None, scope=None, reuse=None):
"""NN up-sampling
"""
with tf.variable_scope(scope or "downsample", reuse=reuse):
result = tf.nn.max_pool(input_, ksize=[1, d_h, d_w, 1], strides=[1, d_h, d_w, 1], padding='SAME')
return result
def linear(opts, input_, output_dim, scope=None, init='normal', reuse=None):
"""Fully connected linear layer.
Args:
input_: [num_points, ...] tensor, where every point can have an
arbitrary shape. In case points are more than 1 dimensional,
we will stretch them out in [numpoints, prod(dims)].
output_dim: number of features for the output. I.e., the second
dimensionality of the matrix W.
"""
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
assert len(shape) > 0
in_shape = shape[1]
if len(shape) > 2:
# This means points contained in input_ have more than one
# dimensions. In this case we first stretch them in one
# dimensional vectors
input_ = tf.reshape(input_, [-1, np.prod(shape[1:])])
in_shape = np.prod(shape[1:])
with tf.variable_scope(scope or "lin", reuse=reuse):
if init == 'normal':
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.random_normal_initializer(stddev=stddev))
else:
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.constant_initializer(np.identity(in_shape)))
bias = tf.get_variable(
"b", [output_dim],
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def conv2d(opts, input_, output_dim, d_h=2, d_w=2, scope=None,
conv_filters_dim=None, padding='SAME', l2_norm=False):
"""Convolutional layer.
Args:
input_: should be a 4d tensor with [num_points, dim1, dim2, dim3].
"""
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d works only with 4d tensors.'
with tf.variable_scope(scope or 'conv2d'):
w = tf.get_variable(
'filter', [k_h, k_w, shape[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if l2_norm:
w = tf.nn.l2_normalize(w, 2)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable(
'b', [output_dim],
initializer=tf.constant_initializer(bias_start))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(opts, input_, output_shape, d_h=2, d_w=2, scope=None, conv_filters_dim=None, padding='SAME'):
"""Transposed convolution (fractional stride convolution) layer.
"""
stddev = opts['init_std']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d_transpose works only with 4d tensors.'
assert len(output_shape) == 4, 'outut_shape should be 4dimensional'
with tf.variable_scope(scope or "deconv2d"):
w = tf.get_variable(
'filter', [k_h, k_w, output_shape[-1], shape[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable(
'b', [output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def log_sum_exp(logits):
l_max = tf.reduce_max(logits, axis=1, keep_dims=True)
return tf.add(l_max,
tf.reduce_sum(
tf.exp(tf.subtract(
logits,
tf.tile(l_max, tf.stack([1, logits.get_shape()[1]])))),
axis=1))
|
<reponame>mjbrewer/testIndex
# Copyright 2015 Rackspace All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import docker
from docker import errors
import mock
from oslo_config import cfg
import six
from magnum.common import exception
from magnum.conductor.handlers import docker_conductor
from magnum import objects
from magnum.objects import fields
from magnum.tests import base
from mock import patch
CONF = cfg.CONF
class TestDockerConductor(base.BaseTestCase):
def setUp(self):
super(TestDockerConductor, self).setUp()
self.conductor = docker_conductor.Handler()
@mock.patch.object(docker_conductor, 'docker_client')
def test_docker_for_bay(self, mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
actual_docker = self.conductor._docker_for_bay(mock_bay)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor, 'docker_client')
@mock.patch.object(docker_conductor.objects.Bay, 'get_by_uuid')
def test_get_docker_client(self, mock_bay_get_by_uuid,
mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
mock_bay_get_by_uuid.return_value = mock_bay
mock_container = mock.MagicMock()
mock_container.bay_uuid = '9fb6c41e-a7e4-48b8-97c4-702b26034b8e'
actual_docker = self.conductor.get_docker_client(
mock.sentinel.context,
mock_container)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_bay_get_by_uuid.assert_called_once_with(mock.sentinel.context,
mock_container.bay_uuid)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor, 'docker_client')
@mock.patch.object(docker_conductor.objects.Bay, 'get_by_uuid')
@mock.patch.object(docker_conductor.objects.Container, 'get_by_uuid')
def test_get_docker_client_container_uuid(self,
mock_container_get_by_uuid,
mock_bay_get_by_uuid,
mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
mock_bay_get_by_uuid.return_value = mock_bay
mock_container = mock.MagicMock()
mock_container.uuid = '8e48ffb1-754d-4f21-bdd0-1a39bf796389'
mock_container.bay_uuid = '9fb6c41e-a7e4-48b8-97c4-702b26034b8e'
mock_container_get_by_uuid.return_value = mock_container
actual_docker = self.conductor.get_docker_client(mock.sentinel.context,
mock_container.uuid)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_container_get_by_uuid.assert_called_once_with(
mock.sentinel.context,
mock_container.uuid)
mock_bay_get_by_uuid.assert_called_once_with(mock.sentinel.context,
mock_container.bay_uuid)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.name = 'some-name'
mock_container.uuid = 'some-uuid'
mock_container.image = 'test_image:some_tag'
mock_container.command = None
container = self.conductor.container_create(
None, mock_container)
utf8_image = self.conductor._encode_utf8(mock_container.image)
mock_docker.pull.assert_called_once_with('test_image',
tag='some_tag')
mock_docker.inspect_image.assert_called_once_with(utf8_image)
mock_docker.create_container.assert_called_once_with(
mock_container.image,
name='some-name',
hostname='some-uuid',
command=None)
self.assertEqual(fields.ContainerStatus.STOPPED, container.status)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create_with_command(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.name = 'some-name'
mock_container.uuid = 'some-uuid'
mock_container.image = 'test_image:some_tag'
mock_container.command = 'env'
container = self.conductor.container_create(
None, mock_container)
utf8_image = self.conductor._encode_utf8(mock_container.image)
mock_docker.pull.assert_called_once_with('test_image',
tag='some_tag')
mock_docker.inspect_image.assert_called_once_with(utf8_image)
mock_docker.create_container.assert_called_once_with(
mock_container.image,
name='some-name',
hostname='some-uuid',
command='env')
self.assertEqual(fields.ContainerStatus.STOPPED, container.status)
def test_encode_utf8_unicode(self):
image = 'some_image:some_tag'
unicode_image = six.u(image)
utf8_image = self.conductor._encode_utf8(unicode_image)
self.assertEqual(image, utf8_image)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create_with_failure(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.image = 'test_image:some_tag'
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.pull = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_create,
None, mock_container)
mock_docker.pull.assert_called_once_with(
'test_image',
tag='some_tag')
self.assertFalse(mock_docker.create_container.called)
mock_init.assert_called_once_with()
self.assertEqual(fields.ContainerStatus.ERROR,
mock_container.status)
def test_find_container_by_name_not_found(self):
mock_docker = mock.MagicMock()
fake_response = mock.MagicMock()
fake_response.content = 'not_found'
fake_response.status_code = 404
mock_docker.list_instances.side_effect = errors.APIError(
'not_found', fake_response)
ret = self.conductor._find_container_by_name(mock_docker, '1')
self.assertEqual({}, ret)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_delete(None, mock_container_uuid)
mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete_with_container_not_exist(
self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = {}
mock_find_container.return_value = mock_docker_id
res = self.conductor.container_delete(None, mock_container_uuid)
self.assertIsNone(res)
self.assertFalse(mock_docker.remove_container.called)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete_with_failure(
self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.remove_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_delete,
None, mock_container_uuid)
mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_action(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor._container_action(None, mock_container_uuid,
'fake-status', 'fake-func')
self.assertEqual('fake-status', mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_reboot(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_reboot(None, mock_container_uuid)
mock_docker.restart.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_reboot_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.restart = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_reboot,
None, mock_container_uuid)
mock_docker.restart.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_start(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_start(None, mock_container_uuid)
mock_docker.start.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_start_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.start = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_start,
None, mock_container_uuid)
mock_docker.start.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_stop(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_stop(None, mock_container_uuid)
mock_docker.stop.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.STOPPED, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_stop_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.stop = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_stop,
None, mock_container_uuid)
mock_docker.stop.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_pause(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_pause(None, mock_container_uuid)
mock_docker.pause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.PAUSED, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_pause_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.pause = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_pause,
None, mock_container_uuid)
mock_docker.pause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_unpause(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_unpause(None, mock_container_uuid)
mock_docker.unpause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_unpause_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.unpause = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_unpause,
None, mock_container_uuid)
mock_docker.unpause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_show(None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_running_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': True,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(fields.ContainerStatus.RUNNING, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_stop_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': False,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(fields.ContainerStatus.STOPPED, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_pause_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': False,
'Paused': True}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(fields.ContainerStatus.PAUSED, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_error_status(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': True,
'Running': False,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(fields.ContainerStatus.ERROR, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_failure(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_get_by_uuid.return_value = mock.MagicMock()
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.inspect_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_show,
None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_not_found(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='404 error') as mock_init:
mock_docker.inspect_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.conductor.container_show(None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
self.assertEqual(fields.ContainerStatus.ERROR,
mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_not_found_from_docker(self,
mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = {}
mock_find_container.return_value = mock_docker_id
self.conductor.container_show(None, mock_container_uuid)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(fields.ContainerStatus.ERROR, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '1.2.2'
mock_find_container.return_value = mock_docker_id
mock_create_res = mock.MagicMock()
mock_docker.exec_create.return_value = mock_create_res
self.conductor.container_exec(None, mock_container_uuid, 'ls')
mock_docker.exec_create.assert_called_once_with(mock_docker_id, 'ls',
True, True, False)
mock_docker.exec_start.assert_called_once_with(mock_create_res,
False, False, False)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_deprecated(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '0.7.0'
mock_find_container.return_value = mock_docker_id
mock_create_res = mock.MagicMock()
mock_docker.exec_create.return_value = mock_create_res
self.conductor.container_exec(None, mock_container_uuid, 'ls')
mock_docker.execute.assert_called_once_with(mock_docker_id, 'ls')
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '1.2.2'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.exec_create = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_exec,
None, mock_container_uuid, 'ls')
mock_docker.exec_create.assert_called_once_with(mock_docker_id,
'ls', True, True,
False)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_deprecated_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '0.7.0'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.execute = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_exec,
None, mock_container_uuid, 'ls')
mock_docker.execute.assert_called_once_with(mock_docker_id, 'ls')
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_logs(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_logs(None, mock_container_uuid)
mock_docker.get_container_logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_logs_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.get_container_logs = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_logs,
None, mock_container_uuid)
mock_docker.get_container_logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
def test_container_common_exception(self):
for action in ('container_exec', 'container_logs', 'container_show',
'container_delete', 'container_create',
'_container_action'):
func = getattr(self.conductor, action)
with patch.object(docker_conductor,
'docker_client') as mock_docker:
mock_docker.side_effect = Exception("So bad")
self.assertRaises(exception.ContainerException,
func, None, None)
|
import json
import logging
from typing import List
import arrow
import cherrypy
import jose
import requests
from cryptography.fernet import InvalidToken
from jose import jwt
from simple_settings import settings
from deli.counter.auth.permission import SYSTEM_PERMISSIONS
from deli.kubernetes.resources.project import Project
from deli.kubernetes.resources.v1alpha1.iam_group.model import IAMSystemGroup
from deli.kubernetes.resources.v1alpha1.iam_policy.model import IAMPolicy
from deli.kubernetes.resources.v1alpha1.iam_role.model import IAMSystemRole, IAMProjectRole
from deli.kubernetes.resources.v1alpha1.iam_service_account.model import SystemServiceAccount, ProjectServiceAccount
from deli.kubernetes.resources.v1alpha1.instance.model import Instance
class Token(object):
def __init__(self):
self.email = None
self.service_account = None
self.metadata = {} # Extra metadata the token contains, currently only used for service accounts
self.expires_at = arrow.now('UTC').shift(days=+1)
self.system_roles = []
self.oauth_groups = []
self.logger = logging.getLogger("%s.%s" % (self.__module__, self.__class__.__name__))
def get_oauth_rsa_key(self, unverified_header):
r = requests.get(settings.OPENID_ISSUER_URL + ".well-known/openid-configuration")
if r.status_code != 200:
try:
r.raise_for_status()
except requests.exceptions.RequestException as e:
self.logger.exception("Backend error while discovering OAuth configuration from provider")
raise cherrypy.HTTPError(424,
"Backend error while discovering OAuth configuration from provider: "
+ e.response.text)
well_known_data = r.json()
r = requests.get(well_known_data['jwks_uri'])
if r.status_code != 200:
try:
r.raise_for_status()
except requests.exceptions.RequestException as e:
self.logger.exception("Backend error while discovering OAuth keys")
raise cherrypy.HTTPError(424,
"Backend error while discovering OAuth keys from provider: " + e.response.text)
rsa_key = {}
for key in r.json()["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
break
if len(rsa_key) == 0:
# Header has a invalid kid
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
return rsa_key
@classmethod
def unmarshal(cls, token_string, fernet):
token = cls()
try:
token_data_bytes = fernet.decrypt(token_string.encode())
token_json = json.loads(token_data_bytes.decode())
token.expires_at = arrow.get(token_json['expires_at']) if token_json['expires_at'] is not None else None
if token.expires_at is not None and token.expires_at <= arrow.now('UTC'):
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
token.metadata = token_json['metadata']
token.email = token_json['email']
except InvalidToken:
try:
token_payload = jwt.decode(token_string,
token.get_oauth_rsa_key(jwt.get_unverified_header(token_string)),
algorithms=['RS256'], audience=settings.OPENID_CLIENT_ID,
issuer=settings.OPENID_ISSUER_URL)
token.expires_at = arrow.get(token_payload['exp'])
token.email = token_payload[settings.OPENID_EMAIL_CLAIM]
token.oauth_groups = token_payload[settings.OPENID_GROUPS_CLAIM]
except jose.JOSEError:
# Unable to decode jwt
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
username, domain, *_ = token.email.split("@")
if domain.endswith('sandwich.local'):
type, project_name, *_ = domain.split('.')
system = True if project_name == 'system' else False
project = None
if system is False:
project = Project.get(project_name)
if project is None:
# Email domain contains invalid project
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
if type == 'service-account':
if system:
service_account = SystemServiceAccount.get(username)
else:
service_account = ProjectServiceAccount.get(project, username)
if service_account is None:
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
if token.metadata['key'] not in service_account.keys:
if 'instance' in token.metadata:
if Instance.get(project, token.metadata['instance']) is None:
# Token says it's from an instance but we can't find it
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
else:
# Token says it's a service account key but it doesn't exist
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
else:
expire_at = service_account.keys[token.metadata['key']]
if expire_at <= arrow.now('UTC'):
# Service account key is expired
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
token.service_account = service_account
else:
# Invalid email type
raise cherrypy.HTTPError(401, 'Invalid Authorization Token.')
system_policy = IAMPolicy.get("system")
token.system_roles = token.find_roles(system_policy)
return token
def marshal(self, fernet):
token_data = {
'email': self.email,
'metadata': self.metadata,
'expires_at': self.expires_at,
}
return fernet.encrypt(json.dumps(token_data).encode())
@property
def identity(self):
if self.service_account is not None:
check_email = "serviceAccount:" + self.email
else:
check_email = "user:" + self.email
return check_email
def find_roles(self, policy: IAMPolicy) -> List[str]:
roles = []
for binding in policy.bindings:
role_name = binding['role']
members = binding['members']
# Check if user is in the members list
if self.identity in members:
roles.append(role_name)
# If the user has oauth groups
# check for groups in the members listing
# and see if the user is part of them
if len(self.oauth_groups) > 0:
for member in members:
if member.endswith("group.system.sandwich.local"):
group_name = member.split("@")[0]
iam_group: IAMSystemGroup = IAMSystemGroup.get(group_name)
if iam_group.oauth_link in self.oauth_groups:
roles.append(role_name)
continue
return roles
def get_projects(self) -> List[Project]:
projects = []
policies = IAMPolicy.list()
for policy in policies:
if policy.name == "system":
continue
if len(self.find_roles(policy)) > 0:
project = Project.get(policy.name)
if project is not None:
projects.append(project)
return projects
def enforce_permission(self, permission, project=None):
if len(self.system_roles) > 0:
if permission in [p['name'] for p in SYSTEM_PERMISSIONS]:
for role_name in self.system_roles:
role = IAMSystemRole.get(role_name)
if role is not None and permission in role.permissions:
return
raise cherrypy.HTTPError(403,
"Insufficient permissions (%s) to perform the requested action." % permission)
if project is not None:
project_policy = IAMPolicy.get(project.name)
if project_policy is None:
raise cherrypy.HTTPError(500, "Could not find iam policy document for project %s" % project.name)
project_roles = self.find_roles(project_policy)
for role_name in project_roles:
role = IAMProjectRole.get(project, role_name)
if role is not None and permission in role.permissions:
return
raise cherrypy.HTTPError(403, "Insufficient permissions (%s) to perform the "
"requested action in the project %s." % (permission, project.name))
raise cherrypy.HTTPError(403, "Insufficient permissions (%s) to perform the requested action." % permission)
|
<reponame>wombat70/behave<filename>tests/unit/tag_expression/test_parser.py<gh_stars>10-100
# -*- coding: UTF-8 -*-
# pylint: disable=bad-whitespace
"""
Unit tests for tag-expression parser.
"""
from __future__ import absolute_import, print_function
from behave.tag_expression.parser import TagExpressionParser, TagExpressionError
from cucumber_tag_expressions.parser import \
Token, Associative, TokenType
import pytest
# -----------------------------------------------------------------------------
# TEST SUITE: Grammar
# -----------------------------------------------------------------------------
# SAME-AS: cucumber/tag-expressions/python/tests/unit/test_parser:TestToken
class TestToken(object):
@pytest.mark.parametrize("token, expected", [
(Token.OR, Associative.LEFT),
(Token.AND, Associative.LEFT),
(Token.NOT, Associative.RIGHT),
(Token.OPEN_PARENTHESIS, None),
(Token.CLOSE_PARENTHESIS, None),
])
def test_assoc(self, token, expected):
assert token.assoc is expected
@pytest.mark.parametrize("token, expected", [
(Token.OR, TokenType.OPERATOR),
(Token.AND, TokenType.OPERATOR),
(Token.NOT, TokenType.OPERATOR),
(Token.OPEN_PARENTHESIS, TokenType.OPERAND),
(Token.CLOSE_PARENTHESIS, TokenType.OPERAND),
])
def test_token_type(self, token, expected):
assert token.token_type == expected
@pytest.mark.parametrize("token, expected", [
(Token.OR, True),
(Token.AND, True),
(Token.NOT, True),
(Token.OPEN_PARENTHESIS, False),
(Token.CLOSE_PARENTHESIS, False),
])
def test_is_operation(self, token, expected):
assert token.is_operation == expected
@pytest.mark.parametrize("token, expected", [
(Token.OR, True),
(Token.AND, True),
(Token.NOT, False),
(Token.OPEN_PARENTHESIS, False),
(Token.CLOSE_PARENTHESIS, False),
])
def test_is_binary(self, token, expected):
assert token.is_binary == expected
@pytest.mark.parametrize("token, expected", [
(Token.OR, False),
(Token.AND, False),
(Token.NOT, True),
(Token.OPEN_PARENTHESIS, False),
(Token.CLOSE_PARENTHESIS, False),
])
def test_is_unary(self, token, expected):
assert token.is_unary == expected
# or_ = ("or", 0, Associative.left)
# and_ = ("and", 1, Associative.left)
# not_ = ("not", 2, Associative.right)
@pytest.mark.parametrize("token1, token2, expected", [
(Token.OR, Token.OR, True),
(Token.OR, Token.AND, True),
(Token.OR, Token.NOT, True),
(Token.AND, Token.AND, True),
(Token.AND, Token.OR, False),
(Token.AND, Token.NOT, True),
(Token.NOT, Token.NOT, False),
(Token.NOT, Token.OR, False),
(Token.NOT, Token.AND, False),
])
def test_has_lower_precedence_than(self, token1, token2, expected):
assert token1.has_lower_precedence_than(token2) == expected
@pytest.mark.parametrize("token1, token2, expected", [
(Token.OR, Token.OPEN_PARENTHESIS, False),
(Token.OR, Token.CLOSE_PARENTHESIS, False),
(Token.AND, Token.OPEN_PARENTHESIS, False),
(Token.AND, Token.CLOSE_PARENTHESIS, False),
(Token.NOT, Token.OPEN_PARENTHESIS, False),
(Token.NOT, Token.CLOSE_PARENTHESIS, False),
])
def test_has_lower_precedence_than__with_parens(self, token1, token2, expected):
assert token1.has_lower_precedence_than(token2) == expected
# -----------------------------------------------------------------------------
# TEST SUITE: Parser
# -----------------------------------------------------------------------------
class TagExpressionParserTestBase(object):
@staticmethod
def assert_parse_expression_equals_expression_string(text, expected):
parser = TagExpressionParser()
expression = parser.parse(text)
expression_text = str(expression)
assert expected == expression_text
@staticmethod
def assert_parse_expression_equals_expression_repr(text, expected):
parser = TagExpressionParser()
expression = parser.parse(text)
expression_text = repr(expression)
assert expected == expression_text
@staticmethod
def assert_parse_with_error_contains_message(text, error_message):
parser = TagExpressionParser()
with pytest.raises(TagExpressionError) as exc_info:
parser.parse(text)
exc_text = exc_info.exconly()
print(exc_text)
assert error_message in exc_text
# SAME-AS: cucumber/tag-expressions/python/tests/unit/test_parser:TestTagExpressionParser
# HINT: Refactoring into TagExpressionParserTestBase is only done here.
class TestTagExpressionParser(TagExpressionParserTestBase):
# -- TESTS FOR: TagExpressionParser.parse()
correct_test_data = [
("a and b", "( a and b )"),
("a or (b)", "( a or b )"),
("not a", "not ( a )"),
("( a and b ) or ( c and d )", "( ( a and b ) or ( c and d ) )"),
("not a or b and not c or not d or e and f",
"( ( ( not ( a ) or ( b and not ( c ) ) ) or not ( d ) ) or ( e and f ) )"),
]
@pytest.mark.parametrize("text, expected", correct_test_data)
def test_parse(self, text, expected):
self.assert_parse_expression_equals_expression_string(text, expected)
@pytest.mark.parametrize("text, expected", [
("(a)", "a"),
("b", "b"),
("(((((c)))))", "c"),
])
def test_parse__with_one_literal(self, text, expected):
self.assert_parse_expression_equals_expression_string(text, expected)
@pytest.mark.parametrize("text", ["", " ",])
def test_parse__empty_is_always_true(self, text):
self.assert_parse_expression_equals_expression_repr(text, "True_()")
@pytest.mark.parametrize("text, expected", [
("a and b or c", "( ( a and b ) or c )"),
("a or b and c", "( a or ( b and c ) )"),
("a and b and c", "( ( a and b ) and c )"),
("a or b or c", "( ( a or b ) or c )"),
("a and not b", "( a and not ( b ) )"),
("a or not b", "( a or not ( b ) )"),
("not a and b", "( not ( a ) and b )"),
("not a or b", "( not ( a ) or b )"),
("not (a and b) or c", "( not ( ( a and b ) ) or c )"),
])
def test_parse__ensure_precedence(self, text, expected):
"""Ensures that the operation precedence is parsed correctly."""
self.assert_parse_expression_equals_expression_string(text, expected)
@pytest.mark.parametrize("text, expected", [
("not not a", "not ( not ( a ) )"),
("not not a and b", "( not ( not ( a ) ) and b )"),
])
def test_parse__with_not_not(self, text, expected):
self.assert_parse_expression_equals_expression_string(text, expected)
# -- BAD CASES:
@pytest.mark.parametrize("text, error_message", [
("( a and b ))", "Missing '(': Too few open-parens"),
("( ( a and b )", "Unclosed '(': Too many open-parens"),
])
def test_parse__fails_with_unbalanced_parens(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("a not ( and )", "Syntax error. Expected operator after a"),
])
def test_parse__fails_with_missing_operation_args(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("or or", "Syntax error. Expected operand after BEGIN"),
])
def test_parse__fails_with_only_operations(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("a b", "Syntax error. Expected operator after a"),
])
def test_parse__fails_for_args_without_operation(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("(())", "Syntax error. Expected operand after ("),
("(() ())", "Syntax error. Expected operand after ("),
])
def test_parse__fails_for_empty_parens_groups(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, expected", [
("a b or", "Syntax error. Expected operator after a"),
("a and (b not)", "Syntax error. Expected operator after b"),
("a and (b c) or", "Syntax error. Expected operator after b"),
])
def test_parse__fails_with_rpn_notation(self, text, expected):
# -- NOTE: RPN parsebility due to Shunting-yard algorithm (stack-based).
self.assert_parse_with_error_contains_message(text, expected)
# -- BAD CASES: Too few operands
@pytest.mark.parametrize("text, error_message", [
("a and ", "and: Too few operands"),
(" and b", "Syntax error. Expected operand after BEGIN"),
])
def test_parse__fails_and_operation_with_too_few_args(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("a or ", "or: Too few operands"),
(" or b", "Syntax error. Expected operand after BEGIN"),
("a and b or ", "or: Too few operands"),
])
def test_parse__fails_or_operation_with_too_few_args(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
@pytest.mark.parametrize("text, error_message", [
("not ", "not: Too few operands"),
("not ()", "Syntax error. Expected operand after ("),
("not () and b", "Syntax error. Expected operand after ("),
("not () or b", "Syntax error. Expected operand after ("),
])
def test_parse__fails_not_operation_with_too_few_args(self, text, error_message):
self.assert_parse_with_error_contains_message(text, error_message)
# -- OTHER TESTS:
@pytest.mark.parametrize("text, expected", [
("or", Token.OR),
("and", Token.AND),
("not", Token.NOT),
("(", Token.OPEN_PARENTHESIS),
(")", Token.CLOSE_PARENTHESIS),
("UNKNOWN", None), # CASE: Literal
])
def test_select_token(self, text, expected):
token = TagExpressionParser.select_token(text)
assert token is expected
# -----------------------------------------------------------------------------
# TEST SUITE: Parser Extension
# -----------------------------------------------------------------------------
class TestTagExpressionParserExt(TagExpressionParserTestBase):
@pytest.mark.parametrize("text, expected", [
("a.*", "Matcher('a.*')"),
("a.* or *.b ", "Or(Matcher('a.*'), Matcher('*.b'))"),
("not a.* or b", "Or(Not(Matcher('a.*')), Literal('b'))"),
("a or *.b", "Or(Literal('a'), Matcher('*.b'))"),
])
def test_parse__with_matcher(self, text, expected):
"""Ensures that the Matcher expressions are parsed correctly."""
self.assert_parse_expression_equals_expression_repr(text, expected)
|
from gridData import Grid
import numpy as np
import sys
sys.path.append("/home/abdullah/Code/Python/SFED/")
from gridcollector import GridCollector
from SFED_routines import sfed_gf_3drism, integrate_sfed
from pathlib import Path
base_path = Path(__file__).parent
data_path = file_path = (base_path / "../data/DATA/HNC/").resolve()
gc3methbut1e = GridCollector("3methbut1e", str(data_path) + "/3methbut1e/3")
gc3methbut1ol = GridCollector("3methbut1ol", str(data_path) + "/3methbut1ol/3")
gc24dimepen = GridCollector("24dimepen", str(data_path) + "/24dimepen/3")
gcethene = GridCollector("ethene", str(data_path) + "/ethene/3")
gcethylbenzene = GridCollector("ethylbenzene", str(data_path) + "/ethylbenzene/3")
gcn_decane = GridCollector("n_decane", str(data_path) + "/n_decane/3")
gcn_hexane = GridCollector("n_hexane", str(data_path) + "/n_hexane/3")
gcphenol = GridCollector("phenol", str(data_path) + "/phenol/3")
gcnhexylbenzene = GridCollector("nhexylbenzene", str(data_path )+ "/nhexylbenzene/3")
prec = 3
def test_hnc_3methbut1e():
sfed_o = sfed_gf_3drism(gc3methbut1e.grids["HO"].grid, gc3methbut1e.grids["CO"].grid, gc3methbut1e.grids["HH1"].grid, gc3methbut1e.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gc3methbut1e.delta)
np.testing.assert_almost_equal(sfe, 8.0294615000000036, prec)
def test_hnc_3methbut1ol():
sfed_o = sfed_gf_3drism(gc3methbut1ol.grids["HO"].grid, gc3methbut1ol.grids["CO"].grid, gc3methbut1ol.grids["HH1"].grid, gc3methbut1ol.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gc3methbut1ol.delta)
np.testing.assert_almost_equal(sfe, 1.4888779999999997, prec)
def test_hnc_24dimepen():
sfed_o = sfed_gf_3drism(gc24dimepen.grids["HO"].grid, gc24dimepen.grids["CO"].grid, gc24dimepen.grids["HH1"].grid, gc24dimepen.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gc24dimepen.delta)
np.testing.assert_almost_equal(sfe, 11.340047900000002, prec)
def test_hnc_ethene():
sfed_o = sfed_gf_3drism(gcethene.grids["HO"].grid, gcethene.grids["CO"].grid, gcethene.grids["HH1"].grid, gcethene.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcethene.delta)
np.testing.assert_almost_equal(sfe, 4.5047478999999999, prec)
def test_hnc_ethylbenzene():
sfed_o = sfed_gf_3drism(gcethylbenzene.grids["HO"].grid, gcethylbenzene.grids["CO"].grid, gcethylbenzene.grids["HH1"].grid, gcethylbenzene.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcethylbenzene.delta)
np.testing.assert_almost_equal(sfe, 4.6103786999999983, prec)
def test_hnc_n_decane():
sfed_o = sfed_gf_3drism(gcn_decane.grids["HO"].grid, gcn_decane.grids["CO"].grid, gcn_decane.grids["HH1"].grid, gcn_decane.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcn_decane.delta)
np.testing.assert_almost_equal(sfe, 15.928665999999993, prec)
def test_hnc_n_hexane():
sfed_o = sfed_gf_3drism(gcn_hexane.grids["HO"].grid, gcn_hexane.grids["CO"].grid, gcn_hexane.grids["HH1"].grid, gcn_hexane.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcn_hexane.delta)
np.testing.assert_almost_equal(sfe, 10.272555799999999, prec)
def test_hnc_phenol():
sfed_o = sfed_gf_3drism(gcphenol.grids["HO"].grid, gcphenol.grids["CO"].grid, gcphenol.grids["HH1"].grid, gcphenol.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcphenol.delta)
np.testing.assert_almost_equal(sfe, -3.90118480000001, prec)
def test_hnc_nhexylbenzene():
sfed_o = sfed_gf_3drism(gcnhexylbenzene.grids["HO"].grid, gcnhexylbenzene.grids["CO"].grid, gcnhexylbenzene.grids["HH1"].grid, gcnhexylbenzene.grids["CH1"].grid)
sfe = integrate_sfed(sfed_o, gcnhexylbenzene.delta)
np.testing.assert_almost_equal(sfe, 10.6374177, prec) |
<reponame>calio/taski<gh_stars>0
import random
import math
from . import util
import npyscreen
orig_sorted = sorted
bag = {}
random.seed()
K = 16
# what is the expectation of Ra wins Rb
def expected(ra, rb):
return 1.0 / (1 + math.pow(10, float(rb - ra)/400))
# Ra' = Ra + K(Sa - Ea)
# Ra is A's score
# Rb is B's score
# Sa (win: 1, draw: 0.5, lose: 0)
def win(ra, rb):
ra = ra + K * (1 - expected(ra, rb))
return ra
def draw(ra, rb):
ra = ra + K * (0.5 - expected(ra, rb))
return ra
def lose(ra, rb):
ra = ra + K * (0 - expected(ra, rb))
return ra
def natual_cmp(a, b):
ia = a["item"]
ib = b["item"]
if ia < ib:
return 1
elif ia == ib:
return 0
else:
return -1
def terminal_cmp(a, b):
question = util.red(
'Which one wins? (0: draw, 1: 1 wins, 2: 2 wins)') + '\n1. %s\n2. %s\n[0/1/2]:'
while True:
try:
item1 = util.green(str(a))
item2 = util.cyan(str(b))
r = eval(input(question % (item1, item2)))
r = int(r)
except (SyntaxError, NameError) as e:
print(e)
continue
if r not in [0, 1, 2]:
print(("Invalid answer: %s\n" % r))
if r == 1:
# return something that is smaller thatn 0
r = -1
else:
r = 1
return r
def show_ui(*args):
global bag
a = bag["a"]["item"]
b = bag["b"]["item"]
F = npyscreen.Form(name="Welcome to ELO ranking",)
t = F.add(npyscreen.TitleText, name="Which one wins?")
ms = F.add(npyscreen.TitleSelectOne, max_height=4, value=[1, ], name="Pick One",
values=[a, b, "Draw"], scroll_exit=False)
F.edit()
d = ms.get_selected_objects()
r = None
if d[0] == "Draw":
r = 0
elif d[0] == a:
r = -1
else:
r = 1
return r
def tui_cmp(a, b):
global bag
bag = {"a": a, "b": b}
#print("a:%s, b:%s" % (a, b))
r = npyscreen.wrapper_basic(show_ui)
# print(r)
return r
def match(player1, player2, cmp=natual_cmp):
r = cmp(player1, player2)
if r == 0:
player1["score"] = draw(player1["score"], player2["score"])
player2["score"] = draw(player2["score"], player1["score"])
elif r < 0:
player1["score"] = win(player1["score"], player2["score"])
player2["score"] = lose(player2["score"], player1["score"])
elif r > 0:
player1["score"] = lose(player1["score"], player2["score"])
player2["score"] = win(player2["score"], player1["score"])
def sort(items, cmp=terminal_cmp, rounds=None):
length = len(items)
n = length
if n < 2:
return items
array = []
for item in items:
bucket = {"score": 0, "item": item}
array.append(bucket)
if rounds is None:
rounds = n
for i in range(rounds):
player1 = random.choice(array)
player2 = random.choice(array)
while player1 == player2:
player2 = random.choice(array)
match(player1, player2, cmp=cmp)
# print(array)
array = orig_sorted(array, key=lambda x: x["score"], reverse=True)
print(array)
res = []
for bucket in array:
res.append(bucket["item"])
return res
def sorted(iterable, cmp=None, key=None, reverse=False, rounds=None):
global orig_sorted
if cmp == None:
cmp = natual_cmp
length = len(iterable)
n = length
if n < 2:
return iterable
if rounds is None:
rounds = n
array = []
for item in iterable:
bucket = {"score": 0, "item": item}
array.append(bucket)
for i in range(rounds):
player1 = random.choice(array)
player2 = random.choice(array)
while player1 == player2:
player2 = random.choice(array)
match(player1, player2, cmp=cmp)
# print(array)
array = orig_sorted(array, key=lambda x: x["score"], reverse=reverse)
# print(array)
res = []
for bucket in array:
res.append(bucket["item"])
return res
|
<reponame>ehoogeboom/convolution_exponential_and_sylvester
import torch
import numpy as np
import torch.nn.functional as F
from models.transformations import BaseTransformation
from models.transformations.conv1x1 import Conv1x1
from models.transformations.emerging.masks import get_conv_square_ar_mask
class SquareAutoRegressiveConv2d(BaseTransformation):
def __init__(self, n_channels):
super(SquareAutoRegressiveConv2d, self).__init__()
self.n_channels = n_channels
kernel_size = [n_channels, n_channels, 2, 2]
weight = torch.randn(kernel_size) / np.sqrt(np.prod(kernel_size))
weight[torch.arange(n_channels), torch.arange(n_channels), -1, -1] += 1.
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(torch.zeros(n_channels))
mask_np = get_conv_square_ar_mask(n_channels, n_channels, 2, 2)
self.register_buffer('mask', torch.from_numpy(mask_np))
from models.transformations.emerging.inverse_triang_conv import Inverse
self.inverse_op = Inverse()
def delta_ldj(self, x):
log_abs_diag = torch.log(torch.abs(self.weight[torch.arange(
self.n_channels), torch.arange(self.n_channels), -1, -1]))
delta_ldj = torch.sum(log_abs_diag) * x.size(2) * x.size(3)
return delta_ldj
def forward(self, x, ldj, context):
weight = self.weight * self.mask
z = F.conv2d(x, weight, self.bias, stride=1, padding=1)
# Slice off last dimensions.
z = z[:, :, :-1, :-1]
delta_ldj = self.delta_ldj(x)
ldj = ldj + delta_ldj
return z, ldj
def reverse(self, z, ldj, context):
weight = self.weight * self.mask
delta_ldj = self.delta_ldj(z)
bias = self.bias.view(1, self.n_channels, 1, 1)
with torch.no_grad():
x_np = self.inverse_op(
z.detach().cpu().numpy(),
weight.detach().cpu().numpy(),
bias.detach().cpu().numpy())
x = torch.from_numpy(x_np).to(z.device, z.dtype)
ldj = ldj - delta_ldj
return x, ldj
class Flip2d(BaseTransformation):
def __init__(self):
super(Flip2d, self).__init__()
def forward(self, x, ldj, context, reverse=False):
height = x.size(2)
width = x.size(3)
x = x[:, :, torch.arange(height-1, -1, -1)]
x = x[:, :, :, torch.arange(width - 1, -1, -1)]
return x, ldj
def reverse(self, z, context, ldj):
return self(z, context, ldj)
class Emerging(BaseTransformation):
def __init__(self, n_channels):
super(Emerging, self).__init__()
self.transformations = torch.nn.ModuleList([
Conv1x1(n_channels),
SquareAutoRegressiveConv2d(n_channels),
Flip2d(),
SquareAutoRegressiveConv2d(n_channels),
Flip2d(),
])
def forward(self, x, logdet, context, reverse=False):
if not reverse:
for transform in self.transformations:
x, logdet = transform(x, logdet, context)
else:
for transform in reversed(self.transformations):
x, logdet = transform.reverse(x, logdet, context)
return x, logdet
def reverse(self, x, logdet, context):
# For this particular reverse it is important that forward is called,
# as it activates the pre-forward hook for spectral normalization.
# This situation occurs when a flow is used to sample, for instance
# in the case of variational dequantization.
return self(x, logdet, context, reverse=True)
if __name__ == '__main__':
x = torch.randn(1, 8, 4, 4)
ldj = torch.zeros(8)
layer = Emerging(8)
z, _ = layer(x, ldj, None)
x_recon, _ = layer.reverse(z, ldj, None)
print(torch.mean((x - x_recon)**2)) |
<reponame>JuiceFV/stankin_pst_project<filename>application/sources/validator/vote.py
"""This module contains the class responsible for voting for a girl.
"""
import application.sources.validator.exception as errors
from matplotlib.widgets import CheckButtons
import matplotlib.pyplot as plt
class Vote:
"""The class itself sets n boxes to a plot with image.
Each posses two choice's buttons (Like/Dislike).
You can select the only one for every box.
"""
def __init__(self, labels, axes_data):
"""Constructor. Initialize everything, some explanations follow below.
:param labels: list of labels of check buttons ['Like', 'Dislike']
:param axes_data: list of dictionaries, which in its turn contains data for checkboxes, specifically an owner's
name and position of its box. Ex: [{'name': name1, 'rect': [1,1,1,1]}{'name': name2, 'rect': [2,2,2,2]}].
"""
if len(axes_data) == 0:
raise errors.VoteDefinition("Passed empty axes")
if len(labels) == 0:
raise errors.VoteDefinition("Passed empty labels")
# Raise the exception if in any of passed box missing either name or rect.
for box_data in axes_data:
if not ('name' in box_data and 'rect' in box_data):
raise errors.VoteDefinition("Missing some field of axes_data.")
# Will fill further
self._names = []
# Labels (['Like', 'Dislike'])
self._labels = labels
# Decisions of participants {'name1': Decision, 'name2': Decision, ...}
self._decisions = {}
for person in axes_data:
# Set axes of checkbox of a person. self.ax_name1, self.ax_name2, ...
setattr(self, 'ax_' + person['name'], plt.axes(person['rect']))
self._names.append(person['name'])
self._decisions.update({person['name']: ''})
# set buttons, self.name1_check = CheckButtons(self.ax_name1, self._labels))
setattr(self, person['name'] + '_check', CheckButtons(getattr(self, 'ax_' + person['name']), self._labels))
# set on click action for each button.
getattr(self, person['name'] + '_check').on_clicked(
lambda label, name=person['name']: self._checkbox_onclick_factory(label, name)
)
def set_invisible(self):
"""The function hides all boxes.
"""
for name in self._names:
getattr(self, 'ax_' + name).remove()
def _checkbox_onclick_factory(self, label, name):
"""This method responsible for reciprocity to buttons.
:param label: Chosen label (Like/Dislike)
:param name: name of a box "owner"
"""
reverse_states = [1, 0]
# retrieving owner's check buttons
check_box = getattr(self, name + '_check')
# and obtain their status [True/False, True/False]
buttons_state = check_box.get_status()
# if two buttons were picked then set the last chosen as active.
if buttons_state == [True, True]:
check_box.set_active(reverse_states[self._labels.index(label)])
# tie user's decision with his name for further usage
self._decisions[name] = label
# In the case if user hasn't chose an option set his decision as empty.
if buttons_state == [False, False]:
self._decisions[name] = ''
@property
def get_decisions(self):
"""Return the dictionary pf user's decisions.
:return: If at least one checkbox is empty (w/o chosen option) return None to handle it as error further.
If all is fine then return dict of decisions.
"""
for name in self._names:
if self._decisions[name] == '':
return None
return self._decisions
@property
def get_vote_axes(self):
"""Returns the vote axes in purpose to convey control in plot to vote.
:return: vote axes.
"""
return getattr(self, 'ax_' + self._names[0])
|
<filename>ver6.py<gh_stars>1-10
#---------------------NEAREST NEIGHBOURS USING KD-TREES----------------------
#Imported modules and files
import math
import matplotlib.pyplot as plt #matplotlib for plotting graphs
import time
from kdtree2 import KDTree
from kdtree2 import binaryheap #The KD-Tree class
#Passing the tree of particular place_type and finding neighbours
def find_places(tree,d,pts):
x,y=[],[]
x_all,y_all=[],[]
for i in pts[0] :
x_all.append(i[0][0])
y_all.append(i[0][1])
print("Do you want to find the closest ",pts[1],
"\n\t1) Enter the no. closest points",
"\n\t2) In a given radius")
t=int(input())
start_time=time.time()
#KNN (K- Nearest Neighbours)
#-- when k is taken from user
if t==1:
l=int(input("Enter no. of closest points required : "))
if len(pts[0])<l:
print("\nOnly ",len(pts[0])," places exist.")
l=len(pts[0])
print("\n\n---- CLOSEST ",l," ",pts[1]," ----")
#Calling nearest neighbour function for a tree
best=tree.nearest_neighbor(d,t,l)
r=best.returnmax()[2]
#Prinitng the results
for _ in range(l):
nextbest=best.extractmax()
#Appeding x and y co-ordinates to two separate lists for plotting purposes
x.append(nextbest[0][0])
y.append(nextbest[0][1])
#printing various attributes of closest point
print("Closest point :",nextbest[0])
print("Min_Distance :",math.sqrt(nextbest[2]))
print("Label of point:",nextbest[1])
print("- - - - - - - - - - - - - - - ")
print("\nCounter of recursive_search:",tree.returncounter())
#Marking the neighbours on a graph
print("\n-------",time.time()-start_time,"-------")
patch=plt.Circle((d[0],d[1]),radius=math.sqrt(r),color="#98ffff",alpha=0.2)
ax=plt.gca()
ax.add_patch(patch)
plt.scatter(d[0], d[1], label="My Location", color= "black",marker= "^", s=140)
plt.scatter(x_all, y_all, label= pts[1], color= pts[2],marker= "*", s=30)
plt.scatter(x,y,s=80,facecolors='none',edgecolors='b')
plt.legend()
plt.show()
#Getting neighbours within a radius
if t==2:
print(" ---- CLOSEST ",pts[1]," ----")
r=float(input("\nEnter search radius :"))
#Calling radius function
best=tree.nearest_neighbor(d,t,0,r)
#Printing the results
for i in range(len(best)):
x.append(best[i][0][0])
y.append(best[i][0][1])
print("mindistance :",math.sqrt(best[i][2]))
print("label:",best[i][1])
print("closest point",best[i][0])
print("- - - - - - - - - - - - - - - ")
#Printing no. of times recursive_search() was called
print("\nCounter of recursive_search:",tree.returncounter())
#Total no. of points
print("\nTotal no. of points within a distance of ",r," are ",len(best)," given above in decreasing order of distance")
#Marking the places on a graph
patch=plt.Circle((d[0],d[1]),radius=r,color="#98ffff",alpha=0.2)
ax=plt.gca()
ax.add_patch(patch)
plt.scatter(d[0], d[1], label="My Location", color= "black",marker= "^", s=140)
plt.scatter(x_all, y_all, label= pts[1], color= pts[2],marker= "*", s=30)
plt.scatter(x,y,s=80,facecolors='none',edgecolors='b')
plt.axis('scaled')
plt.legend()
plt.show()
def plotgraph(*args):
for arg in args:
x=[]
y=[]
for i in arg[0] :
x.append(i[0][0])
y.append(i[0][1])
plt.scatter(x, y, label= arg[1], color=arg[2], marker= "*", s=80)
# x-axis label
plt.xlabel('x - axis')
# frequency label
plt.ylabel('y - axis')
# plot title
plt.title('CITY')
# showing legend
#Extracting coordinates from the corresponding files
#Hotels
def hotels():
points=[]
c=0
infile=open('hotels.txt','r')
for i in infile:
points.append([])
points[c].append((list(map(float,i.rstrip().split(",")))))
points[c].append(c)
c+=1
return KDTree(points),points
#Schools
def schools():
points=[]
c=0
infile=open('schools.txt','r')
for i in infile:
points.append([])
points[c].append((list(map(float,i.rstrip().split(",")))))
points[c].append(c)
c+=1
return KDTree(points),points
#Police
def police():
points=[]
c=0
infile=open('police.txt','r')
for i in infile:
points.append([])
points[c].append((list(map(float,i.rstrip().split(",")))))
points[c].append(c)
c+=1
return KDTree(points),points
#Hospitals
def hospitals():
points=[]
c=0
infile=open('hospitals.txt','r')
for i in infile:
points.append([])
points[c].append((list(map(float,i.rstrip().split(",")))))
points[c].append(c)
c+=1
return KDTree(points),points
#Petrol Bunk
def petrol_bunk():
points=[]
c=0
infile=open('petrol_bunk.txt','r')
for i in infile:
points.append([])
points[c].append((list(map(float,i.rstrip().split(",")))))
points[c].append(c)
c+=1
return KDTree(points),points
def main():
#Entering own location :
print("Enter your location ( x y ): ")
d=list(map(float,input().split()))
#defining python lists to store attributes of a type of place
hotel_list =[None,"hotels","green"]
police_list =[None,"police","yellow"]
hospital_list=[None,"hospitals","red"]
petrol_list =[None,"petrol_bunk","cyan"]
school_list =[None,"schools","blue"]
#making respective trees of differents places
police_tree , police_list[0] = police()
hotel_tree , hotel_list[0] = hotels()
school_tree , school_list[0] = schools()
petrol_tree , petrol_list[0] = petrol_bunk()
hospital_tree,hospital_list[0] = hospitals()
#plotting graph
plotgraph(police_list,hotel_list,school_list,petrol_list,hospital_list)
# my location
plt.scatter(d[0], d[1], label="My_Location", color="black", marker= "^", s=140)
plt.legend()
# function to show the plot
plt.show()
#clear graph for future use
plt.clf()
print("Which closest place do you wanna find?",
"\n\t1.Police Station",
"\n\t2.Hotels",
"\n\t3.Schools",
"\n\t4.Petrol Bunk",
"\n\t5.Hospitals")
choice=int(input())
#Calling the corresponding find_places() method based on choice
if choice==1:
find_places(police_tree,d,police_list)
elif choice==2:
find_places(hotel_tree,d,hotel_list)
elif choice==3:
find_places(school_tree,d,school_list)
elif choice==4:
find_places(petrol_tree,d,petrol_list)
elif choice==5:
find_places(hospital_tree,d,hospital_list)
else:
print("Wrong choice!")
if __name__ == '__main__':
main()
|
########################################################################
import sys
import math
import numpy
import vtk
from heartFEM.lcleeHeart.vtk_py.createFloatArray import *
from heartFEM.lcleeHeart.vtk_py.getABPointsFromBoundsAndCenter import *
from heartFEM.lcleeHeart.vtk_py.getCellCenters import *
from heartFEM.lcleeHeart.vtk_py.getPDataNormals import *
from heartFEM.lcleeHeart.vtk_py.writePData import *
########################################################################
def addLocalProlateSpheroidalDirections(ugrid_wall,
pdata_end,
pdata_epi,
type_of_support="cell",
points_AB=None,
eCCname="eCC",
eLLname="eLL",
eRRname="eRR",
verbose=True):
if (verbose): print ('*** addLocalProlateSpheroidalDirections ***')
if (verbose): print ("Initializing cell locators...")
cell_locator_end = vtk.vtkCellLocator()
cell_locator_end.SetDataSet(pdata_end)
cell_locator_end.Update()
cell_locator_epi = vtk.vtkCellLocator()
cell_locator_epi.SetDataSet(pdata_epi)
cell_locator_epi.Update()
closest_point_end = [0.]*3
closest_point_epi = [0.]*3
generic_cell = vtk.vtkGenericCell()
cellId_end = vtk.mutable(0)
cellId_epi = vtk.mutable(0)
subId = vtk.mutable(0)
dist_end = vtk.mutable(0.)
dist_epi = vtk.mutable(0.)
if (points_AB == None):
points_AB = getABPointsFromBoundsAndCenter(pdata_epi, verbose)
assert (points_AB.GetNumberOfPoints() == 2), "points_AB must have two points. Aborting."
point_A = numpy.array([0.]*3)
point_B = numpy.array([0.]*3)
points_AB.GetPoint(0, point_A)
points_AB.GetPoint(1, point_B)
print("DISPLAY point_A,point_B",point_A,point_B)
eL = point_B - point_A #isapexflip=Flase
eL /= numpy.linalg.norm(eL)
if (type_of_support == "cell"):
pdata_cell_centers = getCellCenters(ugrid_wall)
if (verbose): print ("Computing cell normals...")
pdata_epi_temp = getPDataNormals(pdata_epi, flip=1)
pdata_epi_centers = getCellCenters(pdata_epi_temp)
pdata_pointoutwards_sum=0.
for num_cell in range(pdata_epi_centers.GetPoints().GetNumberOfPoints()):
cell_center = numpy.array(pdata_epi_centers.GetPoints().GetPoint(num_cell))
cell_locator_epi.FindClosestPoint(cell_center, closest_point_epi, generic_cell, cellId_epi, subId, dist_epi)
normal_epi = numpy.reshape(pdata_epi_temp.GetCellData().GetNormals().GetTuple(cellId_epi), (3))
if cell_center[2]>(point_A[2]/2.):
pdata_pointoutwards_sum+=numpy.sum(cell_center[:2]*normal_epi[:2])/numpy.linalg.norm(cell_center[:2])/numpy.linalg.norm(normal_epi[:2])
if(pdata_pointoutwards_sum<0):
pdata_epi = getPDataNormals(pdata_epi, flip=0)
else:
pdata_epi = getPDataNormals(pdata_epi, flip=1)
pdata_end_temp = getPDataNormals(pdata_end, flip=0)
pdata_end_centers = getCellCenters(pdata_end_temp)
pdata_pointoutwards_sum=0.
for num_cell in range(pdata_end_centers.GetPoints().GetNumberOfPoints()):
cell_center = numpy.array(pdata_end_centers.GetPoints().GetPoint(num_cell))
cell_locator_end.FindClosestPoint(cell_center, closest_point_end, generic_cell, cellId_end, subId, dist_end)
normal_end = numpy.reshape(pdata_end_temp.GetCellData().GetNormals().GetTuple(cellId_end), (3))
if cell_center[2]>(point_A[2]/2.):
pdata_pointoutwards_sum+=numpy.sum(cell_center[:2]*normal_end[:2])/numpy.linalg.norm(cell_center[:2])/numpy.linalg.norm(normal_end[:2])
if(pdata_pointoutwards_sum>0):
pdata_end = getPDataNormals(pdata_end, flip=1)
else:
pdata_end = getPDataNormals(pdata_end, flip=0)
if (verbose): print ("Computing surface bounds...")
bounds_end = pdata_end.GetBounds()
bounds_epi = pdata_epi.GetBounds()
z_min_end = bounds_end[4]
z_min_epi = bounds_epi[4]
z_max_end = bounds_end[5]
z_max_epi = bounds_epi[5]
L_end = z_max_end-z_min_end
L_epi = z_max_epi-z_min_epi
if (verbose): print ("Computing local prolate spheroidal directions...")
if (type_of_support == "cell"):
nb_cells = ugrid_wall.GetNumberOfCells()
elif (type_of_support == "point"):
nb_cells = ugrid_wall.GetNumberOfPoints()
farray_norm_dist_end = createFloatArray("norm_dist_end", 1, nb_cells)
farray_norm_dist_epi = createFloatArray("norm_dist_epi", 1, nb_cells)
farray_norm_z_end = createFloatArray("norm_z_end", 1, nb_cells)
farray_norm_z_epi = createFloatArray("norm_z_epi", 1, nb_cells)
farray_eRR = createFloatArray(eRRname, 3, nb_cells)
farray_eCC = createFloatArray(eCCname, 3, nb_cells)
farray_eLL = createFloatArray(eLLname, 3, nb_cells)
for num_cell in range(nb_cells):
if (type_of_support == "cell"):
cell_center = numpy.array(pdata_cell_centers.GetPoints().GetPoint(num_cell))
elif (type_of_support == "point"):
cell_center = numpy.array(ugrid_wall.GetPoints().GetPoint(num_cell))
cell_locator_end.FindClosestPoint(cell_center, closest_point_end, generic_cell, cellId_end, subId, dist_end)
cell_locator_epi.FindClosestPoint(cell_center, closest_point_epi, generic_cell, cellId_epi, subId, dist_epi)
norm_dist_end = dist_end/(dist_end+dist_epi)
norm_dist_epi = dist_epi/(dist_end+dist_epi)
farray_norm_dist_end.InsertTuple(num_cell, [norm_dist_end])
farray_norm_dist_epi.InsertTuple(num_cell, [norm_dist_epi])
norm_z_end = (closest_point_end[2]-z_min_end)/L_end
norm_z_epi = (closest_point_epi[2]-z_min_epi)/L_epi
farray_norm_z_end.InsertTuple(num_cell, [norm_z_end])
farray_norm_z_epi.InsertTuple(num_cell, [norm_z_epi])
normal_end = numpy.reshape(pdata_end.GetCellData().GetNormals().GetTuple(cellId_end), (3))
normal_epi = numpy.reshape(pdata_epi.GetCellData().GetNormals().GetTuple(cellId_epi), (3))
eRR = -1*(1.-norm_dist_end) * normal_end + (1.-norm_dist_epi) * normal_epi
eRR /= numpy.linalg.norm(eRR)
eCC = numpy.cross(eL, eRR)
eCC /= numpy.linalg.norm(eCC)
eLL = numpy.cross(eRR, eCC)
farray_eRR.InsertTuple(num_cell, eRR)
farray_eCC.InsertTuple(num_cell, eCC)
farray_eLL.InsertTuple(num_cell, eLL)
if (verbose): print ("Filling mesh...")
if (type_of_support == "cell"):
ugrid_wall.GetCellData().AddArray(farray_norm_dist_end)
ugrid_wall.GetCellData().AddArray(farray_norm_dist_epi)
ugrid_wall.GetCellData().AddArray(farray_norm_z_end)
ugrid_wall.GetCellData().AddArray(farray_norm_z_epi)
ugrid_wall.GetCellData().AddArray(farray_eRR)
ugrid_wall.GetCellData().AddArray(farray_eCC)
ugrid_wall.GetCellData().AddArray(farray_eLL)
elif (type_of_support == "point"):
ugrid_wall.GetPointData().AddArray(farray_norm_dist_end)
ugrid_wall.GetPointData().AddArray(farray_norm_dist_epi)
ugrid_wall.GetPointData().AddArray(farray_norm_z_end)
ugrid_wall.GetPointData().AddArray(farray_norm_z_epi)
ugrid_wall.GetPointData().AddArray(farray_eRR)
ugrid_wall.GetPointData().AddArray(farray_eCC)
ugrid_wall.GetPointData().AddArray(farray_eLL)
if (__name__ == "__main__"):
assert (len(sys.argv) in [2]), "Number of arguments must be 1. Aborting."
basename = sys.argv[1]
ugrid_wall = readUGrid(basename + "-Mesh.vtk")
pdata_end = readSTL(basename + "-End.stl")
pdata_epi = readSTL(basename + "-Epi.stl")
addLocalProlateSpheroidalDirections(ugrid_wall, pdata_end, pdata_epi)
writeUGrid(ugrid_wall, basename + "-Mesh.vtk")
|
from datetime import datetime
DEFAULT_CONTEXT = "default_attributes"
class AllVoiceTestUtils(object):
def get_mock_alexa_event(self, intent=None, session_id="SessionId.uuid", user_id="user_id", attributes=None, parameters=None):
mock_event = {
"session": {
"sessionId": session_id,
"application": {
"applicationId": "amzn1.ask.skill.1234"
},
"user": {
"userId": user_id
},
"new": False
},
"request": {
"requestId": "EdwRequestId.24744310-0cfc-432e-a5fd-d5f42813b8b7",
"locale": "en-US",
"timestamp": "2016-12-16T16:27:31Z",
},
"version": "1.0"
}
if intent:
mock_event["request"]["type"] = "IntentRequest"
mock_event["request"]["intent"] = {
"name": intent,
"slots": {}
}
if parameters:
mock_event["request"]["intent"]["slots"] = \
self._dict_to_slot(parameters)
if attributes:
mock_event["session"]["attributes"] = attributes
return mock_event
@staticmethod
def _dict_to_slot(params):
return {
key: {"name": key, "value": value}
for key, value in params.items()
}
@staticmethod
def get_mock_google_home_event(
intent=None,
session_id="SessionId.uuid",
user_id="1",
parameters=None,
attributes=None,
contexts=None
):
"""
:type intent: str or None
:type session_id: str or None
:type user_id: str or None
:type parameters: dict or None
:type attributes: dict None
:type contexts: list or None
:rtype: dict
"""
now = datetime.now().isoformat()
if not contexts:
contexts = []
original_request = {
"source": "google",
"data": {
"conversation": {
"conversation_id": "1488240997678",
"type": 2,
"conversation_token": "[]"
},
"user": {
"user_id": user_id
},
"surface": {
"capabilities": [
{"name": "actions.capability.AUDIO_INPUT"},
{"name": "actions.capability.AUDIO_OUTPUT"}
]
},
"inputs": [{
"raw_inputs": [{
"input_type": 2,
"annotation_sets": [],
"query": "who needs help"
}],
"intent": "assistant.intent.action.TEXT",
"arguments": [{
"text_value": "who needs help",
"raw_text": "who needs help",
"name": "text"
}]
}]
}
}
meta_data = {
u'intentName': u'Find News',
u'webhookUsed': u'true',
u'intentId': u'fc0eede5-fa41-46fe-8812-fa13adf65fef',
u'webhookForSlotFillingUsed': u'false'
}
mock_event = {
"id": "4ecf1b39-0d2b-4498-9af9-0ca2f500660a",
"timestamp": now,
"lang": "en",
"originalRequest": original_request,
"result": {
"source": "agent",
"resolvedQuery": "what is happening today",
"actionIncomplete": False,
"metadata": meta_data,
"contexts": contexts,
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 1
},
"status": {
"code": 200,
"errorType": "success"
},
"sessionId": session_id
}
if intent:
mock_event["result"]["action"] = intent
if parameters:
mock_event["result"]["parameters"] = parameters
if attributes:
mock_event["result"]["contexts"].append({
"name": DEFAULT_CONTEXT,
"parameters": attributes,
"lifespan": 99
})
return mock_event
@staticmethod
def context_to_dict(result):
return {
context.get('name', ""): {
'lifespan': context.get('lifespan'),
'parameters': context.get('parameters')
}
for context in result.get('contextOut')
}
def get_attributes(self, result):
return self.context_to_dict(result).get(DEFAULT_CONTEXT).get("parameters")
def get_context(self, result, name):
return self.context_to_dict(result).get(name).get("parameters")
class MockLogger(object):
@classmethod
def get_or_insert(cls, *args, **kwargs):
return ""
@classmethod
def log_error(cls, *args, **kwargs):
return ""
|
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
import re
import json
import logging
logger = logging.getLogger(__name__)
# set of message types to be used when invoking send_notification
# important for client to demultiplex messages
TRANSFER_CONFIRMATION = 'TRANSFER_CONFIRMATION'
SWAP_CONFIRMATION = 'SWAP_CONFIRMATION'
SWAP_CANCELLATION = 'SWAP_CANCELLATION'
SWAP_FINALIZATION = 'SWAP_FINALIZATION'
INCOMING_SWAP = 'INCOMING_SWAP'
MATCHED_SWAP = 'MATCHED_SWAP'
CANCELLED_SWAP = 'CANCELLED_SWAP'
REGISTERED_WALLET = 'REGISTERED_WALLET'
CONFIRMED_DEPOSIT = 'CONFIRMED_DEPOSIT'
REQUESTED_WITHDRAWAL = 'REQUESTED_WITHDRAWAL'
CONFIRMED_WITHDRAWAL = 'CONFIRMED_WITHDRAWAL'
CHECKPOINT_CREATED = 'CHECKPOINT_CREATED'
# subscription stream types and formats
STREAM_TYPES = {
'wallet': re.compile(r'^[a-f0-9]{40}\/[a-f0-9]{40}$'),
'tokenpair': re.compile(r'^[a-f0-9]{40}\/[a-f0-9]{40}$'),
}
# allowed group names
group_regex = re.compile('^[a-zA-Z0-9-.]+$')
channel_layer = get_channel_layer()
def check_structure(struct, conf):
if isinstance(struct, dict) and isinstance(conf, dict):
# struct is a dict of types or other dicts
return all(k in conf and check_structure(struct[k], conf[k]) for k in struct)
if isinstance(struct, list) and isinstance(conf, list):
# struct is list in the form [type or dict]
return all(check_structure(struct[0], c) for c in conf)
elif isinstance(struct, type):
# struct is the type of conf
return isinstance(conf, struct)
elif isinstance(struct, tuple) and isinstance(struct[0], type):
# struct is the type of conf
return isinstance(conf, struct[0]) and re.compile(struct[1]).match(conf.lower())
else:
# struct is neither a dict, nor list, not type
return False
# check if stream is valid
# returns group name of that stream
def is_valid_stream(name):
parts = name.split('/', 1)
if len(parts) < 2:
return False
stream_type, stream_id = parts[0].lower(), parts[1].lower()
if stream_type not in STREAM_TYPES:
return False
if not STREAM_TYPES.get(stream_type).match(stream_id):
return False
stream_id = stream_id.replace("/", ".")
if not group_regex.match(stream_type) or not group_regex.match(stream_id):
return False
return '{}.{}'.format(stream_type, stream_id)
# send websocket notification
async def send_notification_async(stream_prefix, stream_id, event_name, data):
stream_name = '{}/{}'.format(stream_prefix, stream_id)
group_name = is_valid_stream(stream_name)
message = {
'type': 'notification',
'data': {
'type': stream_name,
'data': {
'type': event_name,
'data': data
}
}
}
# if failed for some reason, log error to aid debugging
try:
await channel_layer.group_send(
group_name,
{
'type': 'ws.forward',
'message': message
}
)
except Exception as e:
logger.warning('failed to send notification, {}'.format(e))
def send_notification(stream_prefix, stream_id, event_name, data):
async_to_sync(send_notification_async)(
stream_prefix, stream_id, event_name, data)
# send websocket response
async def send_response(channel_name, resource, data):
message = {
'type': 'response',
'data': {
'type': resource,
'data': data
}
}
# if failed for some reason, log error to aid debugging
try:
await channel_layer.send(
channel_name,
{
'type': 'ws.forward',
'message': message
}
)
except Exception as e:
logger.warning('failed to send response, {}'.format(e))
# send websocket error
async def send_error(channel_name, error, cause=None):
message = {
'type': 'error',
'data': {
'message': error,
'cause': cause
}
}
# if failed for some reason, log error to aid debugging
try:
await channel_layer.send(
channel_name,
{
'type': 'ws.forward',
'message': message
}
)
except Exception as e:
logger.warning('failed to send error, {}'.format(e))
|
# Generated by Django 2.2.10 on 2020-06-11 13:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0018_make_all_soft_deletable'),
]
operations = [
migrations.CreateModel(
name='GovAgency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='RecycledFactory',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('api.factory',),
managers=[
('recycle_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='RecycledImage',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('api.image',),
managers=[
('recycle_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='RecycledReportRecord',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('api.reportrecord',),
managers=[
('recycle_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('reason', models.TextField()),
('factory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviews', to='api.Factory')),
('reviewer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reviews', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FollowUp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('note', models.TextField(help_text='此次進度追蹤備註')),
('created_at', models.DateTimeField(auto_now_add=True)),
('document', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='follow_ups', to='api.Factory')),
('staff', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='follow_ups', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('code', models.CharField(help_text='公文號', max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='documents', to=settings.AUTH_USER_MODEL)),
('factory', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='documents', to='api.Factory')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RecycledDocument',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('api.document',),
managers=[
('recycle_objects', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='RecycledFollowUp',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('api.followup',),
managers=[
('recycle_objects', django.db.models.manager.Manager()),
],
),
]
|
import spotipy
from typing import List, Dict, Tuple, Set
import models
from db import Session_Factory
from threading import Thread
from time import time
from sqlalchemy import cast, desc, asc, Float
# Global Variables
scopes_list = [
"user-read-playback-state",
"user-read-email",
"playlist-read-collaborative",
"user-top-read",
"user-read-currently-playing",
"playlist-read-private",
"playlist-modify-private",
"user-library-read",
"user-library-modify",
"user-read-recently-played",
]
max_plays = 3
max_users = 100
update_interval = 1209600 # Number of seconds to wait (2 wks)
class UserThread:
def __init__(self, thread: Thread, token_info: Dict):
self.thread = thread
self.token_info = token_info
# self.sp: spotipy.Spotify = gen_spotify(self.token_info)
def is_alive(self) -> bool:
return self.thread.is_alive()
def update_token(self, new_token: Dict) -> None:
self.token_info = new_token
# self.sp.set_auth(self.token_info["access_token"])
def get_token(auth_manager: spotipy.oauth2.SpotifyOAuth, refresh_token: str) -> dict:
token = auth_manager.refresh_access_token(refresh_token)
return token
def check_refresh(
auth_manager: spotipy.oauth2.SpotifyOAuth, token: dict
) -> Tuple[Dict, bool]:
assert token["access_token"]
if auth_manager.is_token_expired(token):
return get_token(auth_manager, token["refresh_token"]), True
else:
return token, False
def gen_spotify(token_info: dict) -> spotipy.Spotify:
auth_manager = spotipy.oauth2.SpotifyOAuth(
scope=" ".join(scopes_list), cache_path=".tokens"
)
token_info, mod = check_refresh(auth_manager, token_info)
spotify = spotipy.Spotify(auth=token_info["access_token"])
return spotify
def parse_uri(uri: str) -> str:
ind = uri.find("playlist")
assert ind != -1
return uri[ind:].split(":")[-1]
def resolve_playlist(
spotify: spotipy.Spotify,
username: str,
title: str,
description: str,
create: bool = False,
collab: bool = False,
) -> Dict[str, Dict]:
existing_playlists = spotify.user_playlists(username)
playlists_list: list = existing_playlists["items"]
ret: Dict[str, Dict] = dict()
while playlists_list:
playlist = playlists_list.pop()
if playlist["description"] == description and (
not collab or (collab and playlist.get("collaborative", False))
):
ret[playlist["id"]] = playlist
if len(playlists_list) == 0 and existing_playlists["next"]:
existing_playlists = spotify.next(existing_playlists)
playlists_list = existing_playlists["items"]
else:
playlist = None
if not ret and create:
playlist = spotify.user_playlist_create(
username, title, public=False, description=description
)
ret[playlist["id"]] = playlist
return ret
def update_playlists(
session,
userdata: models.User,
add: Dict[str, Dict],
existing: List[models.Playlist],
candidate: bool,
retstring: str = "",
) -> Tuple[models.User, str]:
checkset = set(add.keys())
for playlist in existing:
if playlist.playlist_id in checkset:
del add[playlist.playlist_id]
else:
is_owner = playlist.owner == userdata.username
if is_owner:
session.delete(playlist)
del userdata.playlists[playlist.playlist_id]
for playlist in add.values():
is_owner = playlist["owner"]["id"] == userdata.username
if is_owner:
userdata.playlists[playlist["id"]] = models.Playlist(
playlist_id=playlist["id"],
owner=playlist["owner"]["id"],
candidate=candidate,
)
else:
existing_playlists = (
session.query(models.Playlist)
.filter_by(playlist_id=playlist["id"])
.first()
)
if existing_playlists:
userdata.playlists[playlist["id"]] = existing_playlists
if candidate: # Only one candidate playlist allowed
break
return userdata, retstring + f"<h2>Stored Playlists Updated</h2><br>"
def gen_user(
session, token_info: Dict
) -> str: # caller is responsible for closing session
spotify = gen_spotify(token_info)
refresh_token = token_info["refresh_token"]
user = spotify.me()
username = user["id"]
email = user["email"]
userdata: models.User = session.query(models.User).filter_by(
username=user["id"]
).first()
candidates = resolve_playlist(
spotify,
username,
"Spotifylter Playlist",
"Spotifylter Auto-Curate",
create=True,
)
collabs = resolve_playlist(
spotify, username, "Spotifylter Collab", "Spotifylter Auto-Collab", collab=True
)
if userdata:
retstring = ""
if userdata.refresh_token != refresh_token:
userdata.refresh_token = refresh_token
retstring += (
f'<h2>{user["display_name"]}\'s access token has been updated</h2><br>'
)
db_curates: List[models.Playlist] = [
pl for pl in userdata.playlists.values() if pl.candidate == True
]
db_collabs: List[models.Playlist] = [
pl for pl in userdata.playlists.values() if pl.candidate == False
]
userdata, retstring = update_playlists(
session, userdata, candidates, db_curates, True, retstring
)
userdata, retstring = update_playlists(
session, userdata, collabs, db_collabs, False, retstring
)
if retstring == "":
return f'<h2>{user["display_name"]} has already been registered</h2>'
else:
session.add(userdata)
session.commit()
return retstring
current_users = session.query(models.User).count()
if current_users < max_users:
retstring = ""
new_user = models.User(
username=username, email=email, refresh_token=refresh_token, last_updated=0,
)
session.add(new_user)
session.commit()
new_user, retstring = update_playlists(
session, new_user, candidates, [], True, retstring
)
new_user, retstring = update_playlists(
session, new_user, collabs, [], False, retstring
)
session.add(new_user)
session.commit()
return retstring + f'<h2>{user["display_name"]} has been created</h2>'
else:
return f"<h2>The user limit: {current_users}/{max_users} has been reached. Account creation failed</h2>"
def delete_user(
session, token_info
) -> bool: # caller is responsible for closing session
spotify = gen_spotify(token_info)
user = spotify.me()
username = user["id"]
res = session.query(models.User).filter_by(username=username).delete()
session.commit()
return res
def update_song(username: str, song: dict, candidate: bool) -> bool:
song_id = song["item"]["id"]
progress = song["progress_ms"] / 1000
duration = song["item"]["duration_ms"] / 1000
s = Session_Factory()
counts: models.Count = s.query(models.Count).filter_by(
username=username, song=song_id, candidate=candidate
).first()
if counts:
counts.song_avg = update_avg(counts.song_count, progress, counts.song_avg)
counts.song_count += 1
else:
counts = models.Count(
username=username,
song=song_id,
candidate=candidate,
song_count=1,
song_avg=progress,
song_duration=duration,
filtered=False,
)
s.add(counts)
ret = counts.song_count >= max_plays
s.commit()
s.close()
return ret
def filter_out(
username: str, sp: spotipy.Spotify, playlist_id: str, song: dict, preserve: bool
):
song_id = song["item"]["id"]
sp.user_playlist_remove_all_occurrences_of_tracks(username, playlist_id, [song_id])
s = Session_Factory()
count_row: models.Count = s.query(models.Count).filter_by(
username=username, song=song_id, candidate=True
).first()
if count_row:
if preserve: # merge
s.delete(count_row)
new_data: models.Count = s.query(models.Count).filter_by(
username=username, song=song_id, candidate=False
).first()
if new_data:
prev_count = new_data.song_count
new_data.song_count += count_row.song_count
new_data.song_avg = (
(prev_count * new_data.song_avg)
+ (count_row.song_count * count_row.song_avg)
) / new_data.song_count
s.add(new_data)
else:
count_row.candidate = False
s.add(count_row)
else:
count_row.filtered = True
s.add(count_row)
s.commit()
s.close()
def update_avg(prev_count: int, progress: float, avg_progress: float) -> float:
return (avg_progress * prev_count + progress) / (prev_count + 1)
def get_song_ids(sp: spotipy.Spotify, playlist_id: str) -> List[str]:
playlist = sp.playlist_tracks(playlist_id, fields="items(track(id)), next")
ret = []
cond = True
while cond:
items = playlist["items"]
ret.extend([item["track"]["id"] for item in items])
cond = playlist.get("next", None)
if cond:
playlist = sp.next(playlist)
return ret
def get_recently_played(sp: spotipy.Spotify) -> List[str]:
recently_played = sp.current_user_recently_played()
songs_set: Set[str] = set()
songs_set.update([item["track"]["id"] for item in recently_played["items"]])
return list(songs_set)
# caller is responsible for closing session
def populate_user(
sp: spotipy.Spotify, s, user: models.User, song_ids: List[str]
) -> int:
candidate: models.Playlist = s.query(models.Playlist).filter_by(
owner=user.username, candidate=True
).first()
pre_ids = set(get_song_ids(sp, candidate.playlist_id))
in_saveds = sp.current_user_saved_tracks_contains(song_ids)
res: List[str] = [
song_ids[i]
for i, in_saved in enumerate(in_saveds)
if not in_saved and song_ids[i] not in pre_ids
]
if res:
sp.user_playlist_add_tracks(user.username, candidate.playlist_id, res)
user.last_updated = int(time())
s.add(user)
s.commit()
return len(res)
def check_user(sp: spotipy.Spotify, session, user: models.User):
if user.last_updated == 0:
populate_user(
sp, session, user, get_recently_played(sp)
) # initial populate since we don't have any data
elif time() - user.last_updated > update_interval:
pass
def get_filtered(s, token_info) -> List[models.Count]:
spotify = gen_spotify(token_info)
user = spotify.me()
username = user["id"]
songs: List[models.Count] = (
s.query(models.Count)
.filter_by(username=username, candidate=True, filtered=True)
.order_by(
asc(
cast(models.Count.song_count, Float)
* (models.Count.song_avg / models.Count.song_duration)
)
)
.all()
)
return songs
def remove_song(s, token_info, song_id) -> int:
spotify = gen_spotify(token_info)
user = spotify.me()
username = user["id"]
res: int = s.query(models.Count).filter_by(
username=username, candidate=True, song=song_id
).delete()
s.commit()
return res
def readd_song(s, token_info, song_id):
spotify = gen_spotify(token_info)
user = spotify.me()
username = user["id"]
song: models.Count = s.query(models.Count).filter_by(
username=username, candidate=True, song=song_id
).first()
song.filtered = False
playlist: models.Playlist = s.query(models.Playlist).filter_by(
owner=username, candidate=True
).first()
spotify.user_playlist_add_tracks(username, playlist.playlist_id, [song_id])
s.add(song)
s.commit()
def get_top_songs(s, token_info, num_songs) -> Tuple[List[models.Count], List[bool]]:
spotify = gen_spotify(token_info)
user = spotify.me()
username = user["id"]
top_songs: List[models.Count] = (
s.query(models.Count)
.filter_by(username=username)
.order_by(
desc(
cast(models.Count.song_count, Float)
* (models.Count.song_avg / models.Count.song_duration)
)
)
.limit(num_songs)
.all()
)
song_ids = [song.song for song in top_songs]
in_saveds = []
for i in range(-(-num_songs // 50)):
in_saveds.extend(
spotify.current_user_saved_tracks_contains(song_ids[50 * i : 50 * (i + 1)])
)
return top_songs, in_saveds
def add_saved(token_info, song_id):
spotify = gen_spotify(token_info)
spotify.current_user_saved_tracks_add([song_id])
|
<filename>f5_cccl/test/test_f5_cccl.py
#!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import f5
import f5_cccl
from f5_cccl._f5 import Policy
import pytest
from mock import MagicMock
def test_version():
assert isinstance(f5_cccl.__version__, str)
@pytest.fixture
def cp():
return copy.deepcopy(controller_policy)
@pytest.fixture
def mock_bigip_policy():
mbp = MagicMock(spec=f5.bigip.tm.ltm.policy.Policy)
mbp.__dict__.update(bigip_policy)
return mbp
def test_policy_class_equal(mock_bigip_policy):
bp = Policy(mock_bigip_policy)
up = Policy(controller_policy)
assert bp == up
def test_policy_class_policy_unequal(mock_bigip_policy, cp):
cp['name'] = 'notequal'
bp = Policy(mock_bigip_policy)
up = Policy(cp)
assert bp != up
def test_policy_class_rule_unequal(mock_bigip_policy, cp):
cp['rules'][0]['ordinal'] = 1
bp = Policy(mock_bigip_policy)
up = Policy(cp)
assert bp != up
def test_policy_class_action_unequal(mock_bigip_policy, cp):
cp['rules'][0]['actions'][0]['pool'] = 'wrong_pool'
bp = Policy(mock_bigip_policy)
up = Policy(cp)
assert bp != up
def test_policy_class_condition_unequal(mock_bigip_policy, cp):
cp['rules'][0]['conditions'][1]['values'].append('another')
bp = Policy(mock_bigip_policy)
up = Policy(cp)
assert bp != up
bigip_policy = {
'_meta_data': {
'allowed_commands': [],
'allowed_lazy_attributes': [
"< class 'f5.bigip.tm.ltm.policy.Rules_s' >",
"< class 'f5.bigip.resource.Stats' >"],
'attribute_registry': {
'tm:ltm:policy:rules:rulescollectionstate':
"< class 'f5.bigip.tm.ltm.policy.Rules_s' >"
},
'bigip': '<f5.bigip.ManagementRoot object at 0x10ecd5410>',
'container': '<f5.bigip.tm.ltm.policy.Policys object at 0x10eed48d0>',
'creation_uri_frag': '',
'creation_uri_qargs': {
'expandSubcollections': ['true'],
'ver': ['12.1.0']
},
'exclusive_attributes': [],
'icontrol_version': '',
'icr_session':
"< icontrol.session.iControlRESTSession object at 0x10ed92410 >",
'minimum_version': '11.5.0',
'object_has_stats': True,
'read_only_attributes': [],
'reduction_forcing_pairs': [('enabled', 'disabled'),
('online', 'offline'),
('vlansEnabled', 'vlansDisabled')],
'required_command_parameters': set([]),
'required_creation_parameters': set(['name', 'strategy']),
'required_json_kind': 'tm:ltm:policy:policystate',
'required_load_parameters': set(['name']),
'uri': 'https://10.190.25.16:443/mgmt/tm/ltm/policy/~k8s~mock/'
},
'controls': ['forwarding'],
'fullPath': '/k8s/mock',
'generation': 117177,
'kind': 'tm:ltm:policy:policystate',
'lastModified': '2017-05-24T21:04:14Z',
'name': 'mock',
'partition': 'k8s',
'references': {},
'requires': ['http'],
'rulesReference': {
'isSubcollection': True,
'items': [{
'actionsReference': {
'isSubcollection': True,
'items': [{
'code': 0,
'expirySecs': 0,
'forward': True,
'fullPath': '0',
'generation': 117175,
'kind': 'tm:ltm:policy:rules:actions:actionsstate',
'length': 0,
'name': '0',
'offset': 0,
'pool': '/k8s/nginx-05c03468ced66d2c',
'poolReference': {
'link': 'https://localhost/mgmt/tm/ltm/pool/'
},
'port': 0,
'request': True,
'select': True,
'selfLink': 'https://localhost/mgmt/tm/ltm/policy/',
'status': 0,
'timeout': 0,
'vlanId': 0
}],
'link': 'https://localhost/mgmt/tm/ltm/policy/'
},
'conditionsReference': {
'isSubcollection': True,
'items': [{
'caseInsensitive': True,
'equals': True,
'external': True,
'fullPath': '0',
'generation': 117175,
'host': True,
'httpHost': True,
'index': 0,
'kind': 'tm:ltm:policy:rules:conditions:conditionsstate',
'name': '0',
'present': True,
'remote': True,
'request': True,
'selfLink': 'https://localhost/mgmt/tm/ltm/policy/',
'values': ['nginx.local.pcfdev.io']
}, {
'caseInsensitive': True,
'equals': True,
'external': True,
'fullPath': '1',
'generation': 117175,
'httpUri': True,
'index': 1,
'kind': 'tm:ltm:policy:rules:conditions:conditionsstate',
'name': '1',
'pathSegment': True,
'present': True,
'remote': True,
'request': True,
'selfLink': 'https://localhost/mgmt/tm/ltm/policy/',
'values': ['foo']
}],
'link': 'https://localhost/mgmt/tm/ltm/policy/'
},
'fullPath': 'nginx-foo',
'generation': 117176,
'kind': 'tm:ltm:policy:rules:rulesstate',
'name': 'nginx-foo',
'ordinal': 0,
'selfLink': 'https://localhost/mgmt/tm/ltm/policy/'
}],
'link': 'https://localhost/mgmt/tm/ltm/policy/'
},
'selfLink': 'https://localhost/mgmt/tm/ltm/policy/',
'status': 'published',
'strategy': '/Common/first-match',
'strategyReference': {
'link': 'https://localhost/mgmt/tm/ltm/policy-strategy/'
}
}
controller_policy = {
'name': 'mock',
'partition': 'k8s',
'controls': ['forwarding'],
'strategy': '/Common/first-match',
'legacy': True,
'requires': ['http'],
'rules': [{
'ordinal': 0,
'conditions': [{
'index': 0,
'name': '0',
'request': True,
'equals': True,
'host': True,
'values': ['nginx.local.pcfdev.io'],
'httpHost': True
}, {
'index': 1,
'name': '1',
'httpUri': True,
'request': True,
'equals': True,
'pathSegment': True,
'values': ['foo']
}],
'name': 'nginx-foo',
'actions': [{
'forward': True,
'request': True,
'name': '0',
'pool': '/k8s/nginx-05c03468ced66d2c'
}]
}]
}
|
<reponame>rprabhuh/SDNDDoS
# encode categorical protocol name to number
def encode_protocol(text):
# put frequent cases to the front
if 'tcp' in text:
return 6
elif 'udp' in text:
return 17
elif 'icmp' in text:
return 1
elif 'hopopt' in text:
return 0
elif 'igmp' in text:
return 2
elif 'ggp' in text:
return 3
elif 'ipv4' in text:
return 4
elif 'stp' in text:
return 118
elif 'st' in text:
return 5
elif 'cbt' in text:
return 7
elif 'egp' in text:
return 8
elif 'igp' in text:
return 9
elif 'bbn-rcc' in text:
return 10
elif 'nvp' in text:
return 11
elif 'pup' in text:
return 12
elif 'argus' in text:
return 13
elif 'emcon' in text:
return 14
elif 'xnet' in text:
return 15
elif 'chaos' in text:
return 16
elif 'mux' in text:
return 18
elif 'dcn' in text:
return 19
elif 'hmp' in text:
return 20
elif 'prm' in text:
return 21
elif 'xns-idp' in text:
return 22
elif 'trunk-1' in text:
return 23
elif 'trunk-2' in text:
return 24
elif 'leaf-1' in text:
return 25
elif 'leaf-2' in text:
return 26
elif 'rdp' in text:
return 27
elif 'irtp' in text:
return 28
elif 'iso-tp4' in text:
return 29
elif 'netblt' in text:
return 30
elif 'mfe-nsp' in text:
return 31
elif 'merit-inp' in text:
return 32
elif 'dccp' in text:
return 33
elif '3pc' in text:
return 34
elif 'idpr' in text:
return 35
elif 'xtp' in text:
return 36
elif 'ddp' in text:
return 37
elif 'idpr-cmtp' in text:
return 38
elif 'tp++' in text:
return 39
elif 'il' in text:
return 40
elif 'sdrp' in text:
return 42
elif 'ipv6-route' in text:
return 43
elif 'ipv6-frag' in text:
return 44
elif 'idrp' in text:
return 45
elif 'rsvp' in text:
return 46
elif 'gre' in text:
return 47
elif 'dsr' in text:
return 48
elif 'bna' in text:
return 49
elif 'esp' in text:
return 50
elif 'ah' in text:
return 51
elif 'i-nlsp' in text:
return 52
elif 'swipe' in text:
return 53
elif 'narp' in text:
return 54
elif 'mobile' in text:
return 55
elif 'tlsp' in text:
return 56
elif 'skip' in text:
return 57
elif 'ipv6-icmp' in text:
return 58
elif 'ipv6-nonxt' in text:
return 59
elif 'ipv6-opts' in text:
return 60
elif 'ipv6' in text:
return 41
elif 'internal' in text:
return 61
elif 'cftp' in text:
return 62
elif 'local' in text:
return 63
elif 'sat-expak' in text:
return 64
elif 'kryptolan' in text:
return 65
elif 'rvd' in text:
return 66
elif 'ippc' in text:
return 67
elif 'distributed' in text:
return 68
elif 'sat-mon' in text:
return 69
elif 'visa' in text:
return 70
elif 'ipcv' in text:
return 71
elif 'cpnx' in text:
return 72
elif 'cphb' in text:
return 73
elif 'wsn' in text:
return 74
elif 'pvp' in text:
return 75
elif 'br-sat-mon' in text:
return 76
elif 'sun-nd' in text:
return 77
elif 'wb-mon' in text:
return 78
elif 'wb-expak' in text:
return 79
elif 'iso-ip' in text:
return 80
elif 'secure-vmtp' in text:
return 82
elif 'vmtp' in text:
return 81
elif 'vines' in text:
return 83
elif 'ttp' in text:
return 84
elif 'iptm' in text:
return 84
elif 'nsfnet-igp' in text:
return 85
elif 'dgp' in text:
return 86
elif 'tcf' in text:
return 87
elif 'eigrp' in text:
return 88
elif 'ospfigp' in text:
return 89
elif 'sprite-rpc' in text:
return 90
elif 'larp' in text:
return 91
elif 'mtp' in text:
return 92
elif 'ax.25' in text:
return 93
elif 'ipip' in text:
return 94
elif 'micp' in text:
return 95
elif 'ssc-sp' in text:
return 96
elif 'etherip' in text:
return 97
elif 'encap' in text:
return 98
elif 'private-encryp' in text:
return 99
elif 'gmtp' in text:
return 100
elif 'ifmp' in text:
return 101
elif 'pnni' in text:
return 102
elif 'pim' in text:
return 103
elif 'aris' in text:
return 104
elif 'scps' in text:
return 105
elif 'qnx' in text:
return 106
elif 'a/n' in text:
return 107
elif 'ipcomp' in text:
return 108
elif 'snp' in text:
return 109
elif 'compaq' in text:
return 110
elif 'ipx-in-ip' in text:
return 111
elif 'vrrp' in text:
return 112
elif 'pgm' in text:
return 113
elif '0-hop' in text:
return 114
elif 'l2tp' in text:
return 115
elif 'ddx' in text:
return 116
elif 'iatp' in text:
return 117
elif 'stp' in text:
return 118
elif 'srp' in text:
return 119
elif 'uti' in text:
return 120
elif 'smp' in text:
return 121
elif 'sm' in text:
return 122
elif 'ptp' in text:
return 123
elif 'isis' in text:
return 124
elif 'fire' in text:
return 125
elif 'crtp' in text:
return 126
elif 'crudp' in text:
return 127
elif 'sscopmce' in text:
return 128
elif 'iplt' in text:
return 129
elif 'sps' in text:
return 130
elif 'pipe' in text:
return 131
elif 'sctp' in text:
return 132
elif 'fc' in text:
return 133
elif 'rsvp-e2e-ignore' in text:
return 134
elif 'mobility' in text:
return 135
elif 'udplite' in text:
return 136
elif 'mpls-in-ip' in text:
return 137
elif 'manet' in text:
return 138
elif 'hip' in text:
return 139
elif 'shim6' in text:
return 140
elif 'wesp' in text:
return 141
elif 'rohc' in text:
return 142
elif 'experiment' in text:
return 143 # 253
elif 'test' in text:
return 144 # 254
else:
return 145 # 255
|
<filename>switchboard/tasks.py
from switchboard.models import Registry, SearchQuery
import requests
import logging
from celery.decorators import task
from celery.utils.log import get_task_logger
from django.core.cache import caches
import json
import time
class SearchQueryStatusLogger(object):
''' A helper class to update status of the Search Query '''
def __init__(self, registries):
# status = 0 = Failure , 1 = Success , 2 = Warning
self.registriesOperationsLog = {}
for idx, r in enumerate(registries):
x = {'status':2, 'errors':[],'warnings':[], 'info':[], 'debug':[], 'success':[], 'statustext':"" }
self.registriesOperationsLog[str(r.id)] = x
self.current_milli_time = lambda: int(round(time.time() * 1000))
def add_warning(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['warnings'].append({'msg':msg,'time':self.current_milli_time()})
def add_success(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['success'].append({'msg':msg,'time':self.current_milli_time()})
def add_error(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['errors'].append({'msg':msg,'time':self.current_milli_time()})
def add_info(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['info'].append({'msg':msg,'time':self.current_milli_time()})
def add_debug(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['debug'].append({'msg':msg,'time':self.current_milli_time()})
def set_statustext(self, registry_id, msg):
self.registriesOperationsLog[registry_id]['statustext'] = msg
def set_status(self, registry_id, status, statustext=None):
self.registriesOperationsLog[registry_id]['status']= status
if statustext:
self.registriesOperationsLog[registry_id]['statustext']= statustext
def get_all_status(self):
allstatus = {}
for stage, results in self.registriesOperationsLog.items():
allstatus[stage] = results['status']
return allstatus
def get_allstatuses(self):
return json.dumps(self.registriesOperationsLog)
class BrokerManager(object):
''' A helper class to query different registries '''
def __init__(self, query_type, query_parameter, query, credentials):
self.query_type = query_type
self.query_parameter = query_parameter
self.query = query
self.credentials = credentials
def get_endpoint(self, registry_endpoint):
parameter_endpoint = {0: '/operators/', 1: '/aircraft/', 2:'/pilot/'}
query_url = registry_endpoint + parameter_endpoint[self.query_parameter]
return query_url
def search_registry(self, registry, logger):
registry_id = str(registry.id)
registry_endpoint = registry.endpoint
url_to_query = self.get_endpoint(registry_endpoint = registry_endpoint)
url_to_query = url_to_query + self.query
bearer_token = "Bearer " + self.credentials
headers = {"Authorization": bearer_token}
# query different registries
try:
r = requests.get(url_to_query, headers = headers)
except requests.exceptions.ConnectionError as ce:
logger.add_error(registry_id = registry_id , msg = "Connection error %s" % ce)
except requests.exceptions.Timeout as te:
logger.add_error(registry_id = registry_id , msg = "Timout error %s" % te)
else:
if r.status_code == 200:
logger.add_success(registry_id = registry_id , msg = "Registry data retrieved")
return r.json()
else:
pass
@task(name="QueryRegistries")
def QueryRegistries(jobid):
sq = SearchQuery.objects.get(id = jobid)
registries = Registry.objects.all()
myOpsLogger = SearchQueryStatusLogger(registries)
myBrokerHelper = BrokerManager( query_type = sq.query_type, query_parameter= sq.query_parameter, query = sq.query, credentials = sq.credentials)
res = []
for registry in registries:
results = myBrokerHelper.search_registry(registry = registry,logger = myOpsLogger)
if results and results['id']:
res.append(results)
else:
myOpsLogger.add_warning(registry_id = str(registry.id), msg= "Data returned but does not contain the ID")
sq.results = res
sq.logs = myOpsLogger.get_allstatuses()
sq.save()
@task(name="Add")
def Add(x, y):
return x + y |
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
from protocolbuffers import Sims_pb2
sim_buff_log_schema = GsiGridSchema(label='Buffs Log', sim_specific=True)
sim_buff_log_schema.add_field('buff_id', label='Buff ID', type=GsiFieldVisualizers.INT, width=0.5)
sim_buff_log_schema.add_field('buff_name', label='Name', width=2)
sim_buff_log_schema.add_field('equipped', label='Equip', width=1)
sim_buff_log_schema.add_field('buff_reason', label='Reason', width=1)
sim_buff_log_schema.add_field('timeout', label='Timeout', width=2)
sim_buff_log_schema.add_field('rate', label='Rate', width=2)
sim_buff_log_schema.add_field('is_mood_buff', label='Is Mood Buff', width=2)
sim_buff_log_schema.add_field('progress_arrow', label='Progress Arrow', width=2)
sim_buff_log_schema.add_field('commodity_guid', label='Commodity Guid', type=GsiFieldVisualizers.INT, hidden=True)
sim_buff_log_schema.add_field('transition_into_buff_id', label='Next Buff ID', type=GsiFieldVisualizers.INT, hidden=True)
sim_buff_log_archiver = GameplayArchiver('sim_buff_log', sim_buff_log_schema)
def archive_buff_message(buff_msg, shows_timeout, change_rate):
buff_reason = hex(buff_msg.reason.hash) if buff_msg.HasField('reason') else None
entry = {'buff_id': buff_msg.buff_id, 'equipped': buff_msg.equipped, 'buff_reason': buff_reason, 'is_mood_buff': buff_msg.is_mood_buff, 'commodity_guid': buff_msg.commodity_guid, 'transition_into_buff_id': buff_msg.transition_into_buff_id}
manager = services.buff_manager()
if manager:
buff_cls = manager.get(buff_msg.buff_id)
entry['buff_name'] = buff_cls.__name__
if buff_msg.equipped:
if shows_timeout:
if buff_msg.timeout:
entry['timeout'] = buff_msg.timeout
entry['rate'] = buff_msg.rate_multiplier
if change_rate is not None:
if buff_msg.buff_progress == Sims_pb2.BUFF_PROGRESS_NONE:
entry['progress_arrow'] = 'No Arrow'
elif buff_msg.buff_progress == Sims_pb2.BUFF_PROGRESS_UP:
entry['progress_arrow'] = 'Arrow Up'
else:
entry['progress_arrow'] = 'Arrow Down'
if buff_msg.HasField('mood_type_override'):
entry['mood_type_override'] = buff_msg.mood_type_override
sim_buff_log_archiver.archive(data=entry, object_id=buff_msg.sim_id)
sim_mood_log_schema = GsiGridSchema(label='Mood Log', sim_specific=True)
sim_mood_log_schema.add_field('mood_id', label='Mood ID', type=GsiFieldVisualizers.INT, width=0.5)
sim_mood_log_schema.add_field('mood_name', label='Name', width=2)
sim_mood_log_schema.add_field('mood_intensity', label='Intensity', width=2)
with sim_mood_log_schema.add_has_many('active_buffs', GsiGridSchema, label='Buffs at update') as sub_schema:
sub_schema.add_field('buff_id', label='Buff ID')
sub_schema.add_field('buff_name', label='Buff name')
sub_schema.add_field('buff_mood', label='Buff Mood')
sub_schema.add_field('buff_mood_override', label='Mood Override (current)')
sub_schema.add_field('buff_mood_override_pending', label='Mood Override (pending)')
sim_mood_log_archiver = GameplayArchiver('sim_mood_log', sim_mood_log_schema)
def archive_mood_message(sim_id, active_mood, active_mood_intensity, active_buffs, changeable_buffs):
mood_entry = {'mood_id': active_mood.guid64, 'mood_name': active_mood.__name__, 'mood_intensity': active_mood_intensity}
active_buff_entries = []
for (buff_type, buff) in active_buffs.items():
buff_entry = {'buff_id': buff_type.guid64, 'buff_name': buff_type.__name__, 'buff_mood': buff.mood_type.__name__ if buff.mood_type is not None else 'None', 'buff_mood_override': buff.mood_override.__name__ if buff.mood_override is not None else 'None'}
for (changeable_buff, new_mood_override) in changeable_buffs:
if changeable_buff is buff:
buff_entry['buff_mood_override_pending'] = 'None' if new_mood_override is None else new_mood_override.__name__
break
active_buff_entries.append(buff_entry)
mood_entry['active_buffs'] = active_buff_entries
sim_mood_log_archiver.archive(data=mood_entry, object_id=sim_id)
|
from functools import partial
from typing import Optional, Tuple
import torch
from torch import nn, Tensor
from torch.autograd import grad
from torch.nn import functional as F
from adv_lib.utils.losses import difference_of_logits, difference_of_logits_ratio
from adv_lib.utils.visdom_logger import VisdomLogger
def pgd_linf(model: nn.Module,
inputs: Tensor,
labels: Tensor,
ε: float,
targeted: bool = False,
steps: int = 40,
random_init: bool = True,
restarts: int = 1,
loss_function: str = 'ce',
relative_step_size: float = 0.01 / 0.3,
absolute_step_size: Optional[float] = None,
callback: Optional[VisdomLogger] = None) -> Tensor:
device = inputs.device
batch_size = len(inputs)
adv_inputs = inputs.clone()
adv_found = torch.zeros(batch_size, dtype=torch.bool, device=device)
pgd_attack = partial(_pgd_linf, model=model, ε=ε, targeted=targeted, steps=steps, random_init=random_init,
loss_function=loss_function, relative_step_size=relative_step_size,
absolute_step_size=absolute_step_size)
for i in range(restarts):
adv_found_run, adv_inputs_run = pgd_attack(inputs=inputs[~adv_found], labels=labels[~adv_found])
adv_inputs[~adv_found] = adv_inputs_run
adv_found[~adv_found] = adv_found_run
if callback:
callback.line('success', i + 1, adv_found.float().mean())
if adv_found.all():
break
return adv_inputs
def _pgd_linf(model: nn.Module,
inputs: Tensor,
labels: Tensor,
ε: float,
targeted: bool = False,
steps: int = 40,
random_init: bool = True,
loss_function: str = 'ce',
relative_step_size: float = 0.01 / 0.3,
absolute_step_size: Optional[float] = None) -> Tuple[Tensor, Tensor]:
_loss_functions = {
'ce': (partial(F.cross_entropy, reduction='none'), 1),
'dl': (difference_of_logits, -1),
'dlr': (partial(difference_of_logits_ratio, targeted=targeted), -1),
}
device = inputs.device
batch_size = len(inputs)
batch_view = lambda tensor: tensor.view(batch_size, *[1] * (inputs.ndim - 1))
clamp = lambda tensor: tensor.data.clamp_(min=-ε, max=ε).add_(inputs).clamp_(min=0, max=1).sub_(inputs)
loss_func, multiplier = _loss_functions[loss_function.lower()]
if absolute_step_size is not None:
step_size = absolute_step_size
else:
step_size = ε * relative_step_size
if targeted:
step_size *= -1
δ = torch.zeros_like(inputs, requires_grad=True)
δ_adv = torch.zeros_like(inputs)
adv_found = torch.zeros(batch_size, dtype=torch.bool, device=device)
if random_init:
δ.data.uniform_(-ε, ε)
clamp(δ)
else:
δ.data.zero_()
for i in range(steps):
logits = model(inputs + δ)
if i == 0 and loss_function.lower() in ['dl', 'dlr']:
labels_infhot = torch.zeros_like(logits).scatter(1, labels.unsqueeze(1), float('inf'))
loss_func = partial(loss_func, labels_infhot=labels_infhot)
loss = multiplier * loss_func(logits, labels)
δ_grad = grad(loss.sum(), δ, only_inputs=True)[0]
is_adv = (logits.argmax(1) == labels) if targeted else (logits.argmax(1) != labels)
δ_adv = torch.where(batch_view(is_adv), δ.detach(), δ_adv)
adv_found.logical_or_(is_adv)
δ.data.add_(δ_grad.sign(), alpha=step_size)
clamp(δ)
return adv_found, inputs + δ_adv
|
<filename>message_html.py
from bs4 import BeautifulSoup
from icecream import ic
from scrape import get_image
from quotes import Quotes
import random_stuff as rs
import itertools
import copy
from dining import DiningInfoManager, all_days, weekdays, weekends
from send import send_mail, email_recipients, debug_email, email_bot
class MessageGenerator:
def __init__(self):
"""
Create a BeautifulSoup object based on a template
and edit the html so that it has the content
it needs, then send the html in an email
"""
with open("assets/template.html") as infile:
self.template = BeautifulSoup(infile, 'lxml')
self.quotes = Quotes()
def __str__(self):
return str(self.template)
def export(self, to="assets/output.html"):
with open(to, "w") as outfile:
outfile.write(str(self.template))
def send(self):
send_mail(debug_email, subject="NOT Dining Hall Food", body=str(self.template))
@classmethod
def from_template(cls, menu_dict):
"""
{"breakfast": [items], "lunch": [items], "dinner": [items]}
"""
mg = cls()
entree = menu_dict["dinner"][0]
mg.set_entree(entree)
mg.set_marketing(entree)
for meal, items in menu_dict.items():
mg._set_meal(items, meal)
mg._set_meal_comments(items, meal)
return mg
@classmethod
def from_dh(cls, manager, day):
"""
Generate an HTML template from the dining hall scraper output
"""
breakfast_items = list(filter(lambda x: "of the Day" not in x,
manager.menu[day]["Breakfast"].split("\n")))
lunch_items = list()
lunch_dict = manager.menu[day]["Lunch"]
for key in ("Sandwiches", "Entree", "Grill", "Pizza"):
lunch_items += lunch_dict.pop(key, "").split("\n")
dinner_items = list()
dinner_dict = manager.menu[day]["Dinner"]
for key in ("Entree", "Grill", "Pizza"):
dinner_items += dinner_dict.pop(key, "").split("\n")
template_dict = {
"breakfast": breakfast_items,
"lunch": lunch_items,
"dinner": dinner_items
}
return cls.from_template(template_dict)
def set_entree(self, entree):
main_entree_text = self.template.find(id="main-dinner-entree")
main_entree_text.string.replace_with(entree)
image_url = get_image(rs.get_food()[0], url_only=True)
main_entree_bg = self.template.find(id="background-dinner-entree")
self._change_bg_image(main_entree_bg, image_url)
def set_marketing(self, entree):
marketing_text = self.template.find(id="marketing")
marketing_text.string.replace_with(f"Inaccurate depiction of {entree}")
marketing_description_text = self.quotes.get_marketing()[0]
marketing_description = self.template.find(id="marketing-description")
marketing_description.string.replace_with(marketing_description_text)
def set_breakfast(self, meal_items):
self._set_meal(meal_items, meal="breakfast")
def set_breakfast_comments(self, meal_items):
self._set_meal_comments(meal_items, "breakfast")
def set_lunch(self, meal_items):
self._set_meal(meal_items, meal="lunch")
def set_lunch_comments(self, meal_items):
self._set_meal_comments(meal_items, "lunch")
def set_dinner(self, meal_items):
self._set_meal(meal_items, meal="dinner")
def set_dinner_comments(self, meal_items):
self._set_meal_comments(meal_items, "dinner")
def _set_meal(self, meal_items, meal="breakfast"):
meal_table = self.template.find(id=f"{meal}-items") # <table>
mt_left, mt_right = meal_table.find_all("table")
sample_tr = copy.copy(mt_left.find("tr"))
for tag in mt_left.find_all("tr") + mt_right.find_all("tr"):
tag.clear()
for item, mt in zip(meal_items, itertools.cycle((mt_left, mt_right))):
this_tr = copy.copy(sample_tr)
# image_url = get_image(item, url_only=True)
descriptor = self.quotes.get_adjective()[0]
this_tr.select("h3 > a")[0].string.replace_with(item)
this_tr.find(class_="price").string.replace_with(descriptor)
# this_tr.find("img")["src"] = image_url
mt.append(this_tr)
def _set_meal_comments(self, meal_items, meal="breakfast"):
meal_comments = self.quotes.get_quotes(meal_items, n=2, quote_only=True)
parent_node = self.template.find(id=f"{meal}-comments")
for comment, name, img_node, name_node, comment_node in zip(meal_comments, self.quotes.get_name(n=2),
parent_node.find_all("img"), parent_node.find_all(class_="name"), parent_node.select(".text-testimony > p")):
img_node["src"] = rs.get_avatar()[0]
name_node.string.replace_with(self.quotes.get_name()[0])
comment_node.string.replace_with(comment)
def _change_bg_image(self, node, url):
node["style"] = node["style"].replace("/*image*/", url)
if __name__ == "__main__":
mg = MessageGenerator.from_template({
"breakfast": ["Eggs", "Tater Tots", "Homefries", "Tofu Scramble"],
"lunch": ["BYO Deli Sandwiches", "Veggie Burger", "White Broccoli Garlic Pizza"],
"dinner": ["New England", "Baked Potato Bar", "Tomato Mozzarella Flatbread", "Ham"]
})
# mg.set_entree("Mongolian Beef")
# mg.set_breakfast(["Eggs", "Ham", "Milk and cereal", "Gritz", "Breakfast pizza", "Orange Juice"])
# mg.set_breakfast_comments(["Eggs", "Ham", "Milk and cereal", "Gritz", "Breakfast pizza", "Orange Juice"])
mg.export() |
<filename>platform/core/polyaxon/polypod/kf_experiment.py
from kubernetes.config import ConfigException
from constants.k8s_jobs import EXPERIMENT_KF_JOB_NAME_FORMAT
from db.redis.ephemeral_tokens import RedisEphemeralTokens
from polyaxon_k8s.exceptions import PolyaxonK8SError
from polypod.experiment import ExperimentSpawner
from polypod.templates.kf_jobs import manager
from polypod.templates.kubeflow import KUBEFLOW_JOB_GROUP
from polypod.templates.volumes import (
get_auth_context_volumes,
get_pod_refs_outputs_volumes,
get_pod_volumes,
get_shm_volumes
)
from schemas import TaskType
class KFExperimentSpawner(ExperimentSpawner):
MASTER_SERVICE = False
RESOURCE_MANAGER = manager.ResourceManager
KIND = None
VERSION = None
PLURAL = None
SPEC = None
@property
def api_version(self):
return '{}/{}'.format(KUBEFLOW_JOB_GROUP, self.VERSION)
def _create_job(self, # pylint:disable=arguments-differ
task_type,
command=None,
args=None,
env_vars=None,
resources=None,
annotations=None,
node_selector=None,
affinity=None,
tolerations=None,
replicas=1,
restart_policy='Never'):
ephemeral_token = None
if self.token_scope:
ephemeral_token = RedisEphemeralTokens.generate_header_token(scope=self.token_scope)
resource_name = self.resource_manager.get_kf_resource_name(task_type=task_type)
labels = self.resource_manager.get_labels(task_type=task_type)
# Set and validate volumes
volumes, volume_mounts = get_pod_volumes(
persistence_outputs=self.persistence_config.outputs,
persistence_data=self.persistence_config.data)
refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes(
outputs_refs=self.outputs_refs_jobs,
persistence_outputs=self.persistence_config.outputs)
volumes += refs_volumes
volume_mounts += refs_volume_mounts
refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes(
outputs_refs=self.outputs_refs_experiments,
persistence_outputs=self.persistence_config.outputs)
volumes += refs_volumes
volume_mounts += refs_volume_mounts
shm_volumes, shm_volume_mounts = get_shm_volumes()
volumes += shm_volumes
volume_mounts += shm_volume_mounts
context_volumes, context_mounts = get_auth_context_volumes()
volumes += context_volumes
volume_mounts += context_mounts
pod_template_spec = self.resource_manager.get_pod_template_spec(
resource_name=resource_name,
volume_mounts=volume_mounts,
volumes=volumes,
labels=labels,
env_vars=env_vars,
command=command,
args=args,
ports=self.ports,
init_env_vars=self.get_init_env_vars(),
persistence_outputs=self.persistence_config.outputs,
persistence_data=self.persistence_config.data,
outputs_refs_jobs=self.outputs_refs_jobs,
outputs_refs_experiments=self.outputs_refs_experiments,
secret_refs=self.spec.secret_refs,
config_map_refs=self.spec.config_map_refs,
resources=resources,
annotations=annotations,
ephemeral_token=ephemeral_token,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
init_context_mounts=context_mounts,
restart_policy=restart_policy)
return {
'replicas': replicas,
'restartPolicy': restart_policy,
'template': pod_template_spec
}
def create_multi_jobs(self, task_type): # pylint:disable=arguments-differ
n_pods = self.get_n_pods(task_type=task_type)
command, args = self.get_pod_command_args(task_type=task_type, task_idx=0)
env_vars = self.get_env_vars(task_type=task_type, task_idx=0)
resources = self.get_resources(task_type=task_type, task_idx=0)
annotations = self.get_annotations(task_type=task_type, task_idx=0)
node_selector = self.get_node_selector(task_type=task_type, task_idx=0)
affinity = self.get_affinity(task_type=task_type, task_idx=0)
tolerations = self.get_tolerations(task_type=task_type, task_idx=0)
return self._create_job(task_type=task_type,
command=command,
args=args,
env_vars=env_vars,
resources=resources,
annotations=annotations,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
replicas=n_pods)
def create_master(self):
command, args = self.get_pod_command_args(task_type=TaskType.MASTER, task_idx=0)
env_vars = self.get_env_vars(task_type=TaskType.MASTER, task_idx=0)
resources = self.get_resources(task_type=TaskType.MASTER, task_idx=0)
annotations = self.get_annotations(task_type=TaskType.MASTER, task_idx=0)
node_selector = self.get_node_selector(task_type=TaskType.MASTER, task_idx=0)
affinity = self.get_affinity(task_type=TaskType.MASTER, task_idx=0)
tolerations = self.get_tolerations(task_type=TaskType.MASTER, task_idx=0)
return self._create_job(task_type=TaskType.MASTER,
command=command,
args=args,
env_vars=env_vars,
resources=resources,
annotations=annotations,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations)
def delete_master(self):
try:
self._delete_job(task_type=TaskType.MASTER, task_idx=0, has_service=self.MASTER_SERVICE)
return True
except PolyaxonK8SError:
return False
def stop_experiment(self):
resource_name = EXPERIMENT_KF_JOB_NAME_FORMAT.format(
experiment_uuid=self.resource_manager.experiment_uuid)
try:
self.delete_custom_object(name=resource_name,
group=KUBEFLOW_JOB_GROUP,
version=self.VERSION,
plural=self.PLURAL)
return True
except (PolyaxonK8SError, ConfigException):
return False
|
<filename>multicell/unsupervised_aligned.py
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import umap
sns.set(style='white', context='notebook', rc={'figure.figsize':(14,10)})
from utils.file_io import RUNS_FOLDER, INPUT_FOLDER
REDUCER_SEED = 100
REDUCER_COMPONENTS = 2
UMAP_KWARGS = {
'random_state': REDUCER_SEED,
'n_components': REDUCER_COMPONENTS,
'metric': 'euclidean',
'init': 'spectral',
'unique': False,
'n_neighbors': 15,
'min_dist': 0.1,
'spread': 1.0,
}
def axis_bounds(embedding):
left, right = embedding.T[0].min(), embedding.T[0].max()
bottom, top = embedding.T[1].min(), embedding.T[1].max()
adj_h, adj_v = (right - left) * 0.1, (top - bottom) * 0.1
return [left - adj_h, right + adj_h, bottom - adj_v, top + adj_v]
if __name__ == '__main__':
use_01 = True
nn = 4999 # runs with 1000, crash with 5000, 10000 -- try to restrict to more int gammas maybe
kk = 1 # debug: subsample multicell spins to avoid memory issue
# Step 0) which 'manyruns' dirs to work with
# gamma_list = [0.0, 0.05, 0.1, 0.2, 1.0, 2.0, 20.0]
#gamma_list = [0.0, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.15, 0.20, 0.4, 0.6, 0.8, 0.9, 1.0, 20.0]
#gamma_list = [0.0, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.15, 0.20, 0.4, 0.6, 0.8, 0.9, 1.0, 20.0]
#gamma_list = gamma_list[::-1]
#gamma_list = [0.0, 0.05] # , 0.06]# , 0.07] # , 0.08, 0.09, 0.10] #, 0.15, 0.20, 0.4, 0.6, 0.8, 0.9, 1.0, 20.0]
gamma_list = [1.0, 20.0]
# manyruns_dirnames = ['Wrandom1_gamma%.2f_10k_fixedorder_ferro' % a for a in gamma_list]
# manyruns_dirnames = ['Wrandom0_gamma%.2f_10k_p3_M100' % a for a in gamma_list]
# manyruns_dirnames = ['Wrandom0_gamma%.2f_10k_fixedorder_p3_M100' % a for a in gamma_list]
# manyruns_dirnames = ['Wrandom1_gamma%.2f_10k_fixedorder_p3_M100' % a for a in gamma_list]
manyruns_dirnames = ['Wrandom0_gamma%.2f_10k_periodic_fixedorderV3_p3_M100' % a for a in
gamma_list]
manyruns_paths = [RUNS_FOLDER + os.sep + 'multicell_manyruns' + os.sep + dirname
for dirname in manyruns_dirnames]
# Step 1) umap (or other dim reduction) kwargs
n_components = 2
X_multi = np.zeros((len(gamma_list), nn, kk), dtype=int)
for j, manyruns_path in enumerate(manyruns_paths):
gamma_val = gamma_list[j]
umap_kwargs = UMAP_KWARGS.copy()
umap_kwargs['n_components'] = n_components # TODO don't need to spec 'live', can embed later?
# modify umap settings
# umap_kwargs['unique'] = True
# umap_kwargs['n_neighbors'] = 100
umap_kwargs['min_dist'] = 0.25
# umap_kwargs['spread'] = 1.0
# umap_kwargs['metric'] = 'euclidean'
# Step 2) make/load data
# ...
smod = '_last'
agg_dir = manyruns_path + os.sep + 'aggregate'
fpath_state = agg_dir + os.sep + 'X_aggregate%s.npz' % smod
fpath_energy = agg_dir + os.sep + 'X_energy%s.npz' % smod
fpath_pickle = manyruns_path + os.sep + 'multicell_template.pkl'
X = np.load(fpath_state)['arr_0'].T # umap wants transpose
X_energies = np.load(fpath_energy)['arr_0'].T # umap wants transpose (?)
with open(fpath_pickle, 'rb') as pickle_file:
multicell_template = pickle.load(pickle_file) # unpickling multicell object
if use_01:
X = (1 + X) / 2.0
X = X.astype(int)
print('accessing', j, manyruns_path)
X_multi[j, :, :] = X[0:nn, 0:kk]
# UMAP aligned needs a relationdict for the 'time varying' dataset
# our relation is that each traj maps to itself (in time) -- constant relation
constant_dict = {i: i for i in range(kk)}
constant_relations = [constant_dict for i in range(len(gamma_list)-1)]
#X_multi_as_list = [X_multi[i,:,:] for i in range(X_multi.shape[0])]
print('Starting AlignedUMAP()...')
#X_multi = tuple([X_multi[i,:,:] for i in range(len(gamma_list))])
#print(type(X_multi[0]))
aligned_mapper = umap.AlignedUMAP().fit(X_multi, relations=constant_relations)
num_rows = 4
num_cols = 4
fig, axs = plt.subplots(num_rows, num_cols, figsize=(10, 20))
ax_bound = axis_bounds(np.vstack(aligned_mapper.embeddings_))
for i, ax in enumerate(axs.flatten()):
if i<len(gamma_list):
print(i)
#current_target = ordered_target[150 * i:min(ordered_target.shape[0], 150 * i + 400)]
ax.scatter(*aligned_mapper.embeddings_[i].T, s=2, cmap="Spectral")
ax.scatter(*aligned_mapper.embeddings_[i].T, s=2, cmap="Spectral_r")
ax.axis(ax_bound)
ax.set(xticks=[], yticks=[])
plt.tight_layout()
plt.savefig('aligned_%d_%d_gammas%d.jpg' % (nn, kk, len(gamma_list)), dpi=300)
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# Based upon makeunicodedata.py
# (http://hg.python.org/cpython/file/c8192197d23d/Tools/unicode/makeunicodedata.py)
# written by <NAME> (<EMAIL>)
#
# Copyright (C) 2011 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import csv
import sys
# ECMAScript 5 $ 7.2
whitespace = [
# python doesn't support using control character names :(
0x9, # CHARACTER TABULATION
0xb, # LINE TABULATION
0xc, # FORM FEED
ord(u'\N{SPACE}'),
ord(u'\N{NO-BREAK SPACE}'),
ord(u'\N{ZERO WIDTH NO-BREAK SPACE}'), # also BOM
]
# $ 7.3
line_terminator = [
0xa, # LINE FEED
0xd, # CARRIAGE RETURN
ord(u'\N{LINE SEPARATOR}'),
ord(u'\N{PARAGRAPH SEPARATOR}'),
]
# These are also part of IdentifierPart $7.6
ZWNJ = ord(u'\N{ZERO WIDTH NON-JOINER}')
ZWJ = ord(u'\N{ZERO WIDTH JOINER}')
FLAG_SPACE = 1 << 0
FLAG_LETTER = 1 << 1
FLAG_IDENTIFIER_PART = 1 << 2
MAX = 0xffff
public_domain = """
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
"""
def read_unicode_data(unicode_file):
"""
If you want to understand how this wonderful file format works checkout
Unicode Standard Annex #44 - Unicode Character Database
http://www.unicode.org/reports/tr44/
"""
reader = csv.reader(unicode_data, delimiter=';')
while True:
row = reader.next()
name = row[1]
# We need to expand the UAX #44 4.2.3 Code Point Range
if name.startswith('<') and name.endswith('First>'):
next_row = reader.next()
for i in range(int(row[0], 16), int(next_row[0], 16) + 1):
row[0] = i
row[1] = name[1:-8]
yield row
else:
row[0] = int(row[0], 16)
yield row
def generate_unicode_stuff(unicode_data, data_file, test_mapping, test_space):
dummy = (0, 0, 0)
table = [dummy]
cache = {dummy: 0}
index = [0] * (MAX + 1)
test_table = {}
test_space_table = []
for row in read_unicode_data(unicode_data):
code = row[0]
name = row[1]
category = row[2]
alias = row[-5]
uppercase = row[-3]
lowercase = row[-2]
flags = 0
if code > MAX:
break
# we combine whitespace and lineterminators because in pratice we don't need them separated
if category == 'Zs' or code in whitespace or code in line_terminator:
flags |= FLAG_SPACE
test_space_table.append(code)
if category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl']: # $ 7.6 (UnicodeLetter)
flags |= FLAG_LETTER
if category in ['Mn', 'Mc', 'Nd', 'Pc'] or code == ZWNJ or code == ZWJ: # $ 7.6 (IdentifierPart)
flags |= FLAG_IDENTIFIER_PART
if uppercase:
upper = int(uppercase, 16)
else:
upper = code
if lowercase:
lower = int(lowercase, 16)
else:
lower = code
test_table[code] = (upper, lower, name, alias)
up_d = upper - code
low_d = lower - code
assert up_d > -65535 and up_d < 65535
assert low_d > -65535 and low_d < 65535
upper = up_d & 0xffff
lower = low_d & 0xffff
item = (upper, lower, flags)
i = cache.get(item)
if i is None:
assert item not in table
cache[item] = i = len(table)
table.append(item)
index[code] = i
test_mapping.write('/* Generated by make_unicode.py DO NOT MODIFY */\n')
test_mapping.write(public_domain)
test_mapping.write('var mapping = [\n')
for code in range(0, MAX + 1):
entry = test_table.get(code)
if entry:
upper, lower, name, alias = entry
test_mapping.write(' [' + hex(upper) + ', ' + hex(lower) + '], /* ' +
name + (' (' + alias + ')' if alias else '') + ' */\n')
else:
test_mapping.write(' [' + hex(code) + ', ' + hex(code) + '],\n')
test_mapping.write('];')
test_mapping.write("""
assertEq(mapping.length, 0x10000);
for (var i = 0; i <= 0xffff; i++) {
var char = String.fromCharCode(i);
var info = mapping[i];
assertEq(char.toUpperCase().charCodeAt(0), info[0]);
assertEq(char.toLowerCase().charCodeAt(0), info[1]);
}
if (typeof reportCompare === "function")
reportCompare(true, true);
""")
test_space.write('/* Generated by make_unicode.py DO NOT MODIFY */\n')
test_space.write(public_domain)
test_space.write('var onlySpace = String.fromCharCode(' +
', '.join(map(lambda c: hex(c), test_space_table)) + ');\n')
test_space.write("""
assertEq(onlySpace.trim(), "");
assertEq((onlySpace + 'aaaa').trim(), 'aaaa');
assertEq(('aaaa' + onlySpace).trim(), 'aaaa');
assertEq((onlySpace + 'aaaa' + onlySpace).trim(), 'aaaa');
if (typeof reportCompare === "function")
reportCompare(true, true);
""")
index1, index2, shift = splitbins(index)
# Don't forget to update CharInfo in Unicode.cpp if you need to change this
assert shift == 5
# verify correctness
for char in index:
test = table[index[char]]
idx = index1[char >> shift]
idx = index2[(idx << shift) + (char & ((1 << shift) - 1))]
assert test == table[idx]
comment = """
/*
* So how does indexing work?
* First let's have a look at a jschar, 16-bits:
* [................]
* Step 1:
* Extracting the upper 11 bits from the jschar.
* upper = char >> 5 ([***********.....])
* Step 2:
* Using these bits to get an reduced index from index1.
* index = index1[upper]
* Step 3:
* Combining the index and the bottom 5 bits of the original jschar.
* real_index = index2[(index << 5) + (char & ((1 << 5) - 1))] ([...********+++++])
*
* The advantage here is that the biggest number in index1 doesn't need 10 bits,
* but 7 and we save some memory.
*
* Step 4:
* Get the character informations by looking up real_index in js_charinfo.
*
* Pseudocode of generation:
*
* let table be the mapping of jschar => js_charinfo_index
* let index1 be an empty array
* let index2 be an empty array
* let cache be a hash map
*
* while shift is less then maximal amount you can shift 0xffff before it's 0
* let chunks be table split in chunks of size 2**shift
*
* for every chunk in chunks
* if chunk is in cache
* let index be cache[chunk]
* else
* let index be the max key of index2 + 1
* for element in chunk
* push element to index2
* put index as chunk in cache
*
* push index >> shift to index1
*
* increase shift
* stop if you found the best shift
*/
"""
data_file.write('/* Generated by make_unicode.py DO NOT MODIFY */\n')
data_file.write(public_domain)
data_file.write('#include "vm/Unicode.h"\n\n')
data_file.write('using namespace js;\n')
data_file.write('using namespace js::unicode;\n')
data_file.write(comment)
data_file.write('const CharacterInfo unicode::js_charinfo[] = {\n')
for d in table:
data_file.write(' {')
data_file.write(', '.join((str(e) for e in d)))
data_file.write('},\n')
data_file.write('};\n')
data_file.write('\n')
def dump(data, name, file):
file.write('const uint8_t unicode::' + name + '[] = {\n')
line = pad = ' ' * 4
lines = []
for entry in data:
assert entry < 256
s = str(entry)
s = s.rjust(3)
if len(line + s) + 5 > 99:
lines.append(line.rstrip())
line = pad + s + ', '
else:
line = line + s + ', '
lines.append(line.rstrip())
file.write('\n'.join(lines))
file.write('\n};\n')
dump(index1, 'index1', data_file)
data_file.write('\n')
dump(index2, 'index2', data_file)
data_file.write('\n')
data_file.write('\n')
def getsize(data):
""" return smallest possible integer size for the given array """
maxdata = max(data)
assert maxdata < 2**32
if maxdata < 256:
return 1
elif maxdata < 65536:
return 2
else:
return 4
def splitbins(t):
"""t -> (t1, t2, shift). Split a table to save space.
t is a sequence of ints. This function can be useful to save space if
many of the ints are the same. t1 and t2 are lists of ints, and shift
is an int, chosen to minimize the combined size of t1 and t2 (in C
code), and where for each i in range(len(t)),
t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
where mask is a bitmask isolating the last "shift" bits.
"""
def dump(t1, t2, shift, bytes):
print("%d+%d bins at shift %d; %d bytes" % (
len(t1), len(t2), shift, bytes), file=sys.stderr)
print("Size of original table:", len(t)*getsize(t), \
"bytes", file=sys.stderr)
n = len(t)-1 # last valid index
maxshift = 0 # the most we can shift n and still have something left
if n > 0:
while n >> 1:
n >>= 1
maxshift += 1
del n
bytes = sys.maxsize # smallest total size so far
t = tuple(t) # so slices can be dict keys
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
bin = t[i:i + size]
index = bincache.get(bin)
if index is None:
index = len(t2)
bincache[bin] = index
t2.extend(bin)
t1.append(index >> shift)
# determine memory size
b = len(t1) * getsize(t1) + len(t2) * getsize(t2)
if b < bytes:
best = t1, t2, shift
bytes = b
t1, t2, shift = best
print("Best:", end=' ', file=sys.stderr)
dump(t1, t2, shift, bytes)
# exhaustively verify that the decomposition is correct
mask = 2**shift - 1
for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best
if __name__ == '__main__':
import urllib2
if len(sys.argv) > 1:
print('Always make sure you have the newest UnicodeData.txt!')
unicode_data = open(sys.argv[1], 'r')
else:
print('Downloading...')
reader = urllib2.urlopen('http://unicode.org/Public/UNIDATA/UnicodeData.txt')
data = reader.read()
reader.close()
unicode_data = open('UnicodeData.txt', 'w+')
unicode_data.write(data)
unicode_data.seek(0)
print('Generating...')
generate_unicode_stuff(unicode_data,
open('Unicode.cpp', 'w'),
open('../tests/ecma_5/String/string-upper-lower-mapping.js', 'w'),
open('../tests/ecma_5/String/string-space-trim.js', 'w'))
|
from typing import Any, Dict, List, NewType, TYPE_CHECKING, Optional, Union
if TYPE_CHECKING:
from .client import Client as TwitchClient
import re
from .message import Message
from .user import User
from .stores import UserStore
from .userstate import UserState
from .undefined import UNDEFINED
from ..Utils.regex import (
ReEmoteOnly, ReFollowersOnly, ReR9k,
ReRituals, ReRoomID, ReSlow,
ReSubsOnly, ReRoomName
)
UserName = NewType("UserName", str)
class Channel(object):
"""
This class is generated when the bot join's a chat room or some kind of channel update happen,
into a usable class and adds it to the bots channels dict
if emergency is True, a message must be given, to create a minimalistic channel class
"""
def __repr__(self):
return f"<{self.__class__.__name__} name='{self.name}'>"
def __str__(self):
return self.name or ""
def __init__(self, raw:Optional[str], emergency:bool=False, Msg:Optional[Message]=None):
# self.broadcaster_lang:str = None
self._emote_only:Optional[bool] = None
self._followers_only:Optional[int] = None
self._r9k:Optional[bool] = None
self._rituals:Optional[int] = None
self._room_id:Optional[str] = None
self._slow:Optional[int] = None
self._subs_only:Optional[bool] = None
self._name:Optional[str] = None
self._viewers:Dict[Union[UserName, str], User] = UserStore()
self._host_target:Optional[str] = None
self._me:Optional[UserState] = None
self.minimalistic:Optional[bool] = None
if raw or Msg:
try:
if emergency: self.buildFromMessage(Msg)
else: self.buildFromEvent(raw)
except:
raise AttributeError(raw)
def compact(self) -> dict:
d:dict = dict()
d["emote_only"] = self.emote_only
d["followers_only"] = self.followers_only
d["rituals"] = self.rituals
d["room_id"] = self.room_id
d["slow"] = self.slow
d["subs_only"] = self.subs_only
d["viewers"] = self.viewers
d["name"] = self.name
d["me"] = self.me
d["host_target"] = self.host_target
d["minimalistic"] = self.minimalistic
return d
# utils
def buildFromEvent(self, raw:str) -> None:
"""
generated by a ROOMSTATE event, gives us all information
@emote-only=0;followers-only=-1;r9k=0;rituals=0;room-id=94638902;slow=0;subs-only=0 :tmi.twitch.tv ROOMSTATE #phaazebot
"""
# _emote_only
search = re.search(ReEmoteOnly, raw)
if search:
self._emote_only = True if search.group(1) == "1" else False
# _followers_only
search = re.search(ReFollowersOnly, raw)
if search:
self._followers_only = int(search.group(1))
# _r9k
search = re.search(ReR9k, raw)
if search:
self._r9k = True if search.group(1) == "1" else False
# _rituals
search = re.search(ReRituals, raw)
if search:
self._rituals = True if search.group(1) == "1" else False
# _room_id
search = re.search(ReRoomID, raw)
if search:
self._room_id = search.group(1)
# _slow
search = re.search(ReSlow, raw)
if search:
self._slow = int(search.group(1))
# _subs_only
search = re.search(ReSubsOnly, raw)
if search:
self._subs_only = True if search.group(1) == "1" else False
# _name
search = re.search(ReRoomName, raw)
if search:
self._name = search.group(1)
def buildFromMessage(self, Msg:Message) -> None:
"""
! emergency function
generated by a message if no channel was found, only gives a minimum of data
can maybe get called at the start of the bot, but hopefully not
"""
self._room_id = Msg.room_id
self._name = Msg.room_name
def update(self, New:"Channel") -> Dict[str, Any]:
"""
together with a new channel object, it updates all attributes that are not None
"""
if type(New) != Channel:
raise AttributeError(f'channel must be "{self.__class__.__name__}" not "{type(New)}"')
changes:Dict[str, Any] = {}
changeable:List[str] = [attr for attr in dir(New) if attr.startswith('_') and not attr.startswith("__")]
for attr in changeable:
if attr in ["_viewers", "_me", "minimalistic"]: continue
new_value:Any = getattr(New, attr, None)
if (new_value is None) or (new_value == UNDEFINED): continue
old_value:Any = getattr(self, attr, None)
if new_value == old_value: continue
setattr(self, attr, new_value)
changes[attr.lstrip('_')] = new_value
return changes
def getViewer(self, **search:dict) -> User or None:
"""
get a user from the channel viewers based on the given kwargs,
returns the first user all kwargs are valid, or None if 0 valid
"""
# yeah name based, because its the only thing we always get, no matter if message, join or leave
for user_name in self.users:
Viewer:User = self.users[user_name]
valid:bool = True
for key in search:
if getattr(Viewer, key, object) != search[key]:
valid = False
break
if valid: return Viewer
return None
# funcs
async def sendMessage(self, cls:"TwitchClient", content:str) -> None:
"""
Send a message to the channel,
requires you to give this function the Client class, don't ask why...
this is basically an alternative to:
cls.sendMessage(Channel.name, content)
makes you think... is this even faster? i dunno, adding it anyways LULW
"""
return await cls.sendMessage(self.name, content)
# props
@property
def emote_only(self) -> bool:
return bool(self._emote_only)
@property
def followers_only(self) -> int:
return int(self._followers_only or 0)
@property
def rituals(self) -> bool:
return bool(self._rituals)
@property
def room_id(self) -> str:
return str(self._room_id or "")
@property
def channel_id(self) -> str:
return str(self._room_id or "")
@property
def slow(self) -> int:
return int(self._slow or 0)
@property
def subs_only(self) -> bool:
return bool(self._subs_only)
@property
def users(self) -> Dict[Union[UserName, str], User]:
return self._viewers
@property
def viewers(self) -> Dict[Union[UserName, str], User]:
return self._viewers
@property
def name(self) -> str:
return str(self._name or "")
@property
def me(self) -> Optional[UserState]:
if self._me:
return self._me
else:
return UserState(None)
@property
def host_target(self) -> str:
return str(self._host_target or "")
@property
def broadcaster_lang(self) -> Exception: # deprecated
raise DeprecationWarning("broadcaster_lang is no longer given as a tag from twitch")
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
__all__ = ["make_ladder", "Sampler"]
import attr
import itertools
import numpy as np
from numpy.random.mtrand import RandomState
from . import util, chain, ensemble
def make_ladder(ndim, ntemps=None, Tmax=None):
"""
Returns a ladder of :math:`\\beta \\equiv 1/T` under a geometric spacing that is determined by the
arguments ``ntemps`` and ``Tmax``. The temperature selection algorithm works as follows:
Ideally, ``Tmax`` should be specified such that the tempered posterior looks like the prior at
this temperature. If using adaptive parallel tempering, per `arXiv:1501.05823
<http://arxiv.org/abs/1501.05823>`_, choosing ``Tmax = inf`` is a safe bet, so long as
``ntemps`` is also specified.
:param ndim:
The number of dimensions in the parameter space.
:param ntemps: (optional)
If set, the number of temperatures to generate.
:param Tmax: (optional)
If set, the maximum temperature for the ladder.
Temperatures are chosen according to the following algorithm:
* If neither ``ntemps`` nor ``Tmax`` is specified, raise an exception (insufficient
information).
* If ``ntemps`` is specified but not ``Tmax``, return a ladder spaced so that a Gaussian
posterior would have a 25% temperature swap acceptance ratio.
* If ``Tmax`` is specified but not ``ntemps``:
* If ``Tmax = inf``, raise an exception (insufficient information).
* Else, space chains geometrically as above (for 25% acceptance) until ``Tmax`` is reached.
* If ``Tmax`` and ``ntemps`` are specified:
* If ``Tmax = inf``, place one chain at ``inf`` and ``ntemps-1`` in a 25% geometric spacing.
* Else, use the unique geometric spacing defined by ``ntemps`` and ``Tmax``.
"""
if type(ndim) != int or ndim < 1:
raise ValueError("Invalid number of dimensions specified.")
if ntemps is None and Tmax is None:
raise ValueError("Must specify one of ``ntemps`` and ``Tmax``.")
if Tmax is not None and Tmax <= 1:
raise ValueError("``Tmax`` must be greater than 1.")
if ntemps is not None and (type(ntemps) != int or ntemps < 1):
raise ValueError("Invalid number of temperatures specified.")
tstep = np.array(
[
25.2741,
7.0,
4.47502,
3.5236,
3.0232,
2.71225,
2.49879,
2.34226,
2.22198,
2.12628,
2.04807,
1.98276,
1.92728,
1.87946,
1.83774,
1.80096,
1.76826,
1.73895,
1.7125,
1.68849,
1.66657,
1.64647,
1.62795,
1.61083,
1.59494,
1.58014,
1.56632,
1.55338,
1.54123,
1.5298,
1.51901,
1.50881,
1.49916,
1.49,
1.4813,
1.47302,
1.46512,
1.45759,
1.45039,
1.4435,
1.4369,
1.43056,
1.42448,
1.41864,
1.41302,
1.40761,
1.40239,
1.39736,
1.3925,
1.38781,
1.38327,
1.37888,
1.37463,
1.37051,
1.36652,
1.36265,
1.35889,
1.35524,
1.3517,
1.34825,
1.3449,
1.34164,
1.33847,
1.33538,
1.33236,
1.32943,
1.32656,
1.32377,
1.32104,
1.31838,
1.31578,
1.31325,
1.31076,
1.30834,
1.30596,
1.30364,
1.30137,
1.29915,
1.29697,
1.29484,
1.29275,
1.29071,
1.2887,
1.28673,
1.2848,
1.28291,
1.28106,
1.27923,
1.27745,
1.27569,
1.27397,
1.27227,
1.27061,
1.26898,
1.26737,
1.26579,
1.26424,
1.26271,
1.26121,
1.25973,
]
)
if ndim > tstep.shape[0]:
# An approximation to the temperature step at large
# dimension
tstep = 1.0 + 2.0 * np.sqrt(np.log(4.0)) / np.sqrt(ndim)
else:
tstep = tstep[ndim - 1]
appendInf = False
if Tmax == np.inf:
appendInf = True
Tmax = None
ntemps = ntemps - 1
if ntemps is not None:
if Tmax is None:
# Determine Tmax from ntemps.
Tmax = tstep ** (ntemps - 1)
else:
if Tmax is None:
raise ValueError(
"Must specify at least one of ``ntemps" " and " "finite ``Tmax``."
)
# Determine ntemps from Tmax.
ntemps = int(np.log(Tmax) / np.log(tstep) + 2)
betas = np.logspace(0, -np.log10(Tmax), ntemps)
if appendInf:
# Use a geometric spacing, but replace the top-most temperature with
# infinity.
betas = np.concatenate((betas, [0]))
return betas
@attr.s(slots=True, frozen=True)
class LikePriorEvaluator(object):
"""
Wrapper class for logl and logp.
"""
logl = attr.ib()
logp = attr.ib()
logl_args = attr.ib(factory=list)
logp_args = attr.ib(factory=list)
logl_kwargs = attr.ib(factory=dict)
logp_kwargs = attr.ib(factory=dict)
def __call__(self, x):
lp = self.logp(x, *self.logp_args, **self.logp_kwargs)
if np.isnan(lp).any():
raise ValueError("Prior function returned NaN.")
else:
inds_fix = np.isinf(lp)
inds_good = np.arange(x.shape[0])[inds_fix == False]
ll = np.zeros(x.shape[0])
if len(inds_good) > 0:
x[inds_fix] = x[inds_good[0]]
ll[:] = self.logl(x)
ll[inds_fix] = 0.0
"""if lp == float("-inf"):
# Can't return -inf, since this messes with beta=0 behaviour.
ll = 0
else:
ll = self.logl(x, *self.logl_args, **self.logl_kwargs)
if np.isnan(ll).any():
raise ValueError("Log likelihood function returned NaN.")"""
return ll, lp
@attr.s(slots=True, frozen=True)
class Sampler(object):
# Mandatory parameters.
nwalkers = attr.ib(converter=int)
ndim = attr.ib(converter=int)
logl = attr.ib()
logp = attr.ib()
logl_args = attr.ib(converter=list, factory=list)
logp_args = attr.ib(converter=list, factory=list)
logl_kwargs = attr.ib(converter=dict, factory=dict)
logp_kwargs = attr.ib(converter=dict, factory=dict)
betas = attr.ib(default=None)
# Tuning parameters.
adaptive = attr.ib(converter=bool, default=False)
adaptation_lag = attr.ib(converter=int, default=10000)
adaptation_time = attr.ib(converter=int, default=100)
scale_factor = attr.ib(converter=float, default=2)
_mapper = attr.ib(default=map)
_evaluator = attr.ib(type=LikePriorEvaluator, init=False, default=None)
_data = attr.ib(type=np.ndarray, init=False, default=None)
@nwalkers.validator
def _validate_nwalkers(self, attribute, value):
if value % 2 != 0:
raise ValueError("The number of walkers must be even.")
if value < 2 * self.ndim:
raise ValueError(
"The number of walkers must be greater than 2 * dimension."
)
@ndim.validator
def _validate_ndim(self, attribute, value):
if value < 1:
raise ValueError("Number of dimensions must be positive.")
@betas.validator
def _validate_betas(self, attribute, value):
if len(value) < 1:
raise ValueError("Need at least one temperature!")
if (value < 0).any():
raise ValueError("Temperatures must be non-negative.")
@logl.validator
@logp.validator
def _is_callable(self, attribute, value):
if not callable(value):
raise TypeError("{} must be callable".format(attribute.name))
def __attrs_post_init__(self):
if self.betas is None:
betas = make_ladder(self.ndim)
elif isinstance(self.betas, int):
# Treat this as the number of temperatures to use.
betas = make_ladder(self.ndim, self.betas)
else:
betas = util._ladder(self.betas)
object.__setattr__(self, "betas", betas)
object.__setattr__(
self,
"_evaluator",
LikePriorEvaluator(
logl=self.logl,
logp=self.logp,
logl_args=self.logl_args,
logp_args=self.logp_args,
logl_kwargs=self.logl_kwargs,
logp_kwargs=self.logp_kwargs,
),
)
def ensemble(self, x, random=None):
if random is None:
random = RandomState()
elif not isinstance(random, RandomState):
raise TypeError("Invalid random state.")
config = ensemble.EnsembleConfiguration(
adaptation_lag=self.adaptation_lag,
adaptation_time=self.adaptation_time,
scale_factor=self.scale_factor,
evaluator=self._evaluator,
)
return ensemble.Ensemble(
x=x,
betas=self.betas.copy(),
config=config,
adaptive=self.adaptive,
random=random,
mapper=self._mapper,
)
def sample(self, x, random=None, thin_by=None):
"""
Return a stateless iterator.
"""
if thin_by is None:
thin_by = 1
# Don't yield the starting state.
ensemble = self.ensemble(x, random)
while True:
for _ in range(thin_by):
ensemble.step()
yield ensemble
def chain(self, x, random=None, thin_by=None):
"""
Create a stateful chain that stores its history.
"""
return chain.Chain(self.ensemble(x, random), thin_by)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.