id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11223726 | import sys
import logging
from PyQt5.QtWidgets import (
QHBoxLayout,
QGroupBox,
QDesktopWidget,
QMainWindow,
QApplication,
QMenu,
QAction,
QFileDialog,
QSplitter,
QActionGroup,
QDialog,
)
from PyQt5.QtCore import Qt
from symupy.postprocess.visunet import logger
from symupy.postprocess.visunet.network import NetworkWidget
from symupy.postprocess.visunet.qtutils import TripSelector, ODSelector
from symupy.postprocess.visunet.right_panel import RightPanelWidget
from symupy.plugins.reader import add_dir_to_plugin
from symupy.postprocess.visunet.routes import RoutesHandler
logger.setLevel(logging.INFO)
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.resize(QDesktopWidget().availableGeometry(self).size() * 0.7)
self.layout = QSplitter(Qt.Horizontal)
self.setCentralWidget(self.layout)
self.setWindowTitle("VisuNet")
self.netWidget = NetworkWidget(parent=self)
self.layout.addWidget(self.netWidget)
self.panel = RightPanelWidget(parent=self)
self.layout.addWidget(self.panel)
self.routes = RoutesHandler()
self.initMenu()
def initMenu(self):
self.menubar = self.menuBar()
if sys.platform == "darwin":
self.menubar.setNativeMenuBar(False)
self.fileMenu = QMenu("&Network", self)
self.menubar.addMenu(self.fileMenu)
self.openNetAction = QAction("&Open...", self)
self.fileMenu.addAction(self.openNetAction)
self.openNetAction.triggered.connect(self.open_network)
self.openNetAction.setShortcut("Ctrl+N")
self.routeMenu = QMenu("&Routes", self)
self.menubar.addMenu(self.routeMenu)
self.openTrajAction = QAction("&Open...", self)
self.openTrajAction.triggered.connect(self.open_traffic_data)
self.routeMenu.addAction(self.openTrajAction)
self.openTrajAction.setShortcut("Ctrl+T")
self.renderPathAction = QAction("&Render Path...", self)
self.renderPathAction.setDisabled(True)
self.renderPathAction.triggered.connect(self.select_path)
self.routeMenu.addAction(self.renderPathAction)
self.renderODAction = QAction("&Render OD...", self)
self.routeMenu.addAction(self.renderODAction)
self.renderODAction.triggered.connect(self.selectOD)
self.renderODAction.setDisabled(True)
self.renderTripAction = QAction("&Render Trip...", self)
self.renderTripAction.setDisabled(True)
self.renderTripAction.triggered.connect(self.select_trip)
self.routeMenu.addAction(self.renderTripAction)
self.exportAction = QAction("&Export...", self)
self.routeMenu.addAction(self.exportAction)
self.exportAction.triggered.connect(self.routes.export_csv)
self.clearAction = QAction("&Clear", self)
self.routeMenu.addAction(self.clearAction)
self.clearAction.triggered.connect(self.netWidget.clear)
self.pluginMenu = QMenu("&Plugins", self)
self.menubar.addMenu(self.pluginMenu)
self.submenuReader = self.pluginMenu.addMenu("&Reader")
self.addFolderAction = QAction("&Add folder...", self)
self.submenuReader.addAction(self.addFolderAction)
self.addFolderAction.triggered.connect(self.add_plugins)
self.logMenu = QMenu("&Log", self)
self.menubar.addMenu(self.logMenu)
self.submenuLogger = self.logMenu.addMenu("&Level")
self.levelGroup = QActionGroup(self)
self.levelDBGAction = QAction("&Debug", self)
self.levelINFAction = QAction("&Info", self)
self.levelWRNAction = QAction("&Warning", self)
self.levelERRAction = QAction("&Error", self)
self.levelDBGAction.setCheckable(True)
self.levelINFAction.setCheckable(True)
self.levelWRNAction.setCheckable(True)
self.levelERRAction.setCheckable(True)
self.submenuLogger.addAction(self.levelDBGAction)
self.submenuLogger.addAction(self.levelINFAction)
self.submenuLogger.addAction(self.levelWRNAction)
self.submenuLogger.addAction(self.levelERRAction)
self.levelGroup.addAction(self.levelDBGAction)
self.levelGroup.addAction(self.levelINFAction)
self.levelGroup.addAction(self.levelWRNAction)
self.levelGroup.addAction(self.levelERRAction)
self.levelDBGAction.triggered.connect(self.setLoggerLevelDBG)
self.levelINFAction.triggered.connect(self.setLoggerLevelINF)
self.levelWRNAction.triggered.connect(self.setLoggerLevelWRN)
self.levelERRAction.triggered.connect(self.setLoggerLevelERR)
self.levelINFAction.setChecked(True)
self.clearLogAction = QAction("&Clear", self)
self.logMenu.addAction(self.clearLogAction)
self.clearLogAction.triggered.connect(self.clearLog)
def open_network(self):
file, _ = QFileDialog.getOpenFileName(
self, "Load Network", options=QFileDialog.DontUseNativeDialog
)
if file != "":
self.netWidget.choose_reader(file)
self.panel.update_label_network(file.split("/")[-1])
def open_traffic_data(self):
file, _ = QFileDialog.getOpenFileName(
self, "Load Traffic Data", options=QFileDialog.DontUseNativeDialog
)
self.routes.clear()
self.routes.addRenderer(self.netWidget.renderer)
if file != "":
self.routes.choose_reader(file)
self.panel.update_label_traffic_data(file.split("/")[-1])
reader = self.routes.reader
if hasattr(reader, "get_path") and callable(getattr(reader, "get_path")):
self.renderPathAction.setDisabled(False)
else:
self.renderPathAction.setDisabled(True)
if hasattr(reader, "get_OD") and callable(getattr(reader, "get_OD")):
self.renderODAction.setDisabled(False)
else:
self.renderODAction.setDisabled(True)
if hasattr(reader, "get_trip") and callable(getattr(reader, "get_trip")):
self.renderTripAction.setDisabled(False)
else:
self.renderTripAction.setDisabled(True)
def select_path(self):
trip_selector = TripSelector()
if trip_selector.exec_() == QDialog.Accepted:
vehid = trip_selector.vehid.value()
if vehid != "":
logger.info(f"Looking for path {vehid} and plotting it ...")
self.routes.addPath(vehid)
def select_trip(self):
trip_selector = TripSelector()
if trip_selector.exec_() == QDialog.Accepted:
vehid = trip_selector.vehid.value()
if vehid != "":
logger.info(f"Looking for trip {vehid} and plotting it ...")
self.routes.addTrip(vehid)
def selectOD(self):
OD_selector = ODSelector(self.routes.reader.get_OD)
if OD_selector.exec_() == QDialog.Accepted:
args = [None if arg == "None" else arg for arg in OD_selector.values]
self.routes.addOD(args)
def add_plugins(self):
options = QFileDialog.Options(
QFileDialog.Options(QFileDialog.DontUseNativeDialog)
)
folder = str(
QFileDialog.getExistingDirectory(self, "Load Plugins", "", options=options)
)
if folder != "":
add_dir_to_plugin(folder)
def setLoggerLevelDBG(self):
logger.setLevel(logging.DEBUG)
def setLoggerLevelINF(self):
logger.setLevel(logging.INFO)
def setLoggerLevelWRN(self):
logger.setLevel(logging.WARNING)
def setLoggerLevelERR(self):
logger.setLevel(logging.ERROR)
def clearLog(self):
self.panel.logger_widget.clear()
def launch_app(file=None):
app = QApplication(sys.argv)
w = MainWindow()
w.show()
if file is not None:
w.data.file_network = file
w.panel.panel_netw.plot_network()
app.exec_()
if __name__ == "__main__":
launch_app()
| StarcoderdataPython |
255920 | # Data taken from the MathML 2.0 reference
data = '''
"(" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
")" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"[" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"]" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"{" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"}" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"”" form="postfix" fence="true" lspace="0em" rspace="0em"
"’" form="postfix" fence="true" lspace="0em" rspace="0em"
"⟨" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"&LeftBracketingBar;" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⌈" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⟦" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"&LeftDoubleBracketingBar;" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⌊" form="prefix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"“" form="prefix" fence="true" lspace="0em" rspace="0em"
"‘" form="prefix" fence="true" lspace="0em" rspace="0em"
"⟩" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"&RightBracketingBar;" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⌉" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⟧" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"&RightDoubleBracketingBar;" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"⌋" form="postfix" fence="true" stretchy="true" lspace="0em" rspace="0em"
"&LeftSkeleton;" form="prefix" fence="true" lspace="0em" rspace="0em"
"&RightSkeleton;" form="postfix" fence="true" lspace="0em" rspace="0em"
"⁣" form="infix" separator="true" lspace="0em" rspace="0em"
"," form="infix" separator="true" lspace="0em" rspace="verythickmathspace"
"─" form="infix" stretchy="true" minsize="0" lspace="0em" rspace="0em"
"|" form="infix" stretchy="true" minsize="0" lspace="0em" rspace="0em"
";" form="infix" separator="true" lspace="0em" rspace="thickmathspace"
";" form="postfix" separator="true" lspace="0em" rspace="0em"
":=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≔" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∵" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∴" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"❘" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"//" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∷" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"&" form="prefix" lspace="0em" rspace="thickmathspace"
"&" form="postfix" lspace="thickmathspace" rspace="0em"
"*=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"-=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"+=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"/=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"->" form="infix" lspace="thickmathspace" rspace="thickmathspace"
":" form="infix" lspace="thickmathspace" rspace="thickmathspace"
".." form="postfix" lspace="mediummathspace" rspace="0em"
"..." form="postfix" lspace="mediummathspace" rspace="0em"
"∋" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⫤" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊨" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊤" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊣" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊢" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⇒" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥰" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"|" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"||" form="infix" lspace="mediummathspace" rspace="mediummathspace"
"⩔" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"&&" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩓" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"&" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"!" form="prefix" lspace="0em" rspace="thickmathspace"
"⫬" form="prefix" lspace="0em" rspace="thickmathspace"
"∃" form="prefix" lspace="0em" rspace="thickmathspace"
"∀" form="prefix" lspace="0em" rspace="thickmathspace"
"∄" form="prefix" lspace="0em" rspace="thickmathspace"
"∈" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∉" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∌" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊏̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋢" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊐̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋣" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊂⃒" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊈" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊃⃒" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊉" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∋" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊏" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊑" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊐" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊒" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋐" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊆" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊃" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊇" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⇐" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇔" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇒" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥐" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥞" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↽" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥖" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥟" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇁" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥗" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"←" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇤" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇆" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↔" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥎" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↤" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥚" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↼" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥒" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↙" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↘" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"→" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇥" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇄" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↦" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥛" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⇀" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⥓" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"←" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"→" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"↖" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"↗" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"<" form="infix" lspace="thickmathspace" rspace="thickmathspace"
">" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"!=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"==" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"<=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
">=" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≡" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≍" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≐" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∥" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩵" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≂" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⇌" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"≥" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋛" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≧" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪢" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≷" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩾" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≳" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≎" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≏" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊲" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⧏" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊴" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≤" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋚" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≦" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≶" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪡" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩽" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≲" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≫" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≪" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≢" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≭" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∦" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≠" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≂̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≯" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≱" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≧̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≫̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≹" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩾̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≵" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≎̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≏̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋪" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⧏̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋬" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≮" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≰" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"&NotLessFullEqual;" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≪̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⩽̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≴" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪢̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪡̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊀" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪯̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋠" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"&NotPrecedesTilde;" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋫" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⧐̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋭" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊁" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪰̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⋡" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≿̸" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≁" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≄" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≇" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≉" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∤" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≺" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪯" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≼" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≾" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∷" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∝" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⇋" form="infix" stretchy="true" lspace="thickmathspace" rspace="thickmathspace"
"⊳" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⧐" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊵" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≻" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⪰" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≽" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≿" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∼" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≃" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≅" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"≈" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊥" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"∣" form="infix" lspace="thickmathspace" rspace="thickmathspace"
"⊔" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"⋃" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"⊎" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"-" form="infix" lspace="mediummathspace" rspace="mediummathspace"
"+" form="infix" lspace="mediummathspace" rspace="mediummathspace"
"⋂" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"∓" form="infix" lspace="mediummathspace" rspace="mediummathspace"
"±" form="infix" lspace="mediummathspace" rspace="mediummathspace"
"⊓" form="infix" stretchy="true" lspace="mediummathspace" rspace="mediummathspace"
"⋁" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"⊖" form="prefix" largeop="true" movablelimits="true" lspace="0em" rspace="thinmathspace"
"⊕" form="prefix" largeop="true" movablelimits="true" lspace="0em" rspace="thinmathspace"
"∑" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"⋃" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"⊎" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"lim" form="prefix" movablelimits="true" lspace="0em" rspace="thinmathspace"
"max" form="prefix" movablelimits="true" lspace="0em" rspace="thinmathspace"
"min" form="prefix" movablelimits="true" lspace="0em" rspace="thinmathspace"
"⊖" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⊕" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"∲" form="prefix" largeop="true" stretchy="true" lspace="0em" rspace="0em"
"∮" form="prefix" largeop="true" stretchy="true" lspace="0em" rspace="0em"
"∳" form="prefix" largeop="true" stretchy="true" lspace="0em" rspace="0em"
"∯" form="prefix" largeop="true" stretchy="true" lspace="0em" rspace="0em"
"∫" form="prefix" largeop="true" stretchy="true" lspace="0em" rspace="0em"
"⋓" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋒" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"≀" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋀" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"⊗" form="prefix" largeop="true" movablelimits="true" lspace="0em" rspace="thinmathspace"
"∐" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"∏" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"⋂" form="prefix" largeop="true" movablelimits="true" stretchy="true" lspace="0em" rspace="thinmathspace"
"∐" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋆" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⊙" form="prefix" largeop="true" movablelimits="true" lspace="0em" rspace="thinmathspace"
"*" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⁢" form="infix" lspace="0em" rspace="0em"
"·" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⊗" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋁" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋀" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"⋄" form="infix" lspace="thinmathspace" rspace="thinmathspace"
"∖" form="infix" stretchy="true" lspace="thinmathspace" rspace="thinmathspace"
"/" form="infix" stretchy="true" lspace="thinmathspace" rspace="thinmathspace"
"-" form="prefix" lspace="0em" rspace="veryverythinmathspace"
"+" form="prefix" lspace="0em" rspace="veryverythinmathspace"
"∓" form="prefix" lspace="0em" rspace="veryverythinmathspace"
"±" form="prefix" lspace="0em" rspace="veryverythinmathspace"
"." form="infix" lspace="0em" rspace="0em"
"⨯" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"**" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"⊙" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"∘" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"□" form="prefix" lspace="0em" rspace="verythinmathspace"
"∇" form="prefix" lspace="0em" rspace="verythinmathspace"
"∂" form="prefix" lspace="0em" rspace="verythinmathspace"
"ⅅ" form="prefix" lspace="0em" rspace="verythinmathspace"
"ⅆ" form="prefix" lspace="0em" rspace="verythinmathspace"
"√" form="prefix" stretchy="true" lspace="0em" rspace="verythinmathspace"
"⇓" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟸" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟺" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟹" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇑" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇕" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↓" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⤓" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇵" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↧" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥡" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇃" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥙" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥑" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥠" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↿" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥘" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟵" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟷" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⟶" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥯" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥝" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇂" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥕" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥏" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥜" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↾" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥔" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↓" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"↑" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"↑" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⤒" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⇅" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↕" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"⥮" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"↥" form="infix" stretchy="true" lspace="verythinmathspace" rspace="verythinmathspace"
"^" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"<>" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"'" form="postfix" lspace="verythinmathspace" rspace="0em"
"!" form="postfix" lspace="verythinmathspace" rspace="0em"
"!!" form="postfix" lspace="verythinmathspace" rspace="0em"
"~" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"@" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"--" form="postfix" lspace="verythinmathspace" rspace="0em"
"--" form="prefix" lspace="0em" rspace="verythinmathspace"
"++" form="postfix" lspace="verythinmathspace" rspace="0em"
"++" form="prefix" lspace="0em" rspace="verythinmathspace"
"⁡" form="infix" lspace="0em" rspace="0em"
"?" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"_" form="infix" lspace="verythinmathspace" rspace="verythinmathspace"
"˘" form="postfix" accent="true" lspace="0em" rspace="0em"
"¸" form="postfix" accent="true" lspace="0em" rspace="0em"
"`" form="postfix" accent="true" lspace="0em" rspace="0em"
"˙" form="postfix" accent="true" lspace="0em" rspace="0em"
"˝" form="postfix" accent="true" lspace="0em" rspace="0em"
"&DiacriticalLeftArrow;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"&DiacriticalLeftRightArrow;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"&DiacriticalLeftRightVector;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"&DiacriticalLeftVector;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"´" form="postfix" accent="true" lspace="0em" rspace="0em"
"&DiacriticalRightArrow;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"&DiacriticalRightVector;" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"˜" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"¨" form="postfix" accent="true" lspace="0em" rspace="0em"
"̑" form="postfix" accent="true" lspace="0em" rspace="0em"
"ˇ" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"^" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"‾" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⏞" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⎴" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⏜" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⃛" form="postfix" accent="true" lspace="0em" rspace="0em"
"_" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⏟" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⎵" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
"⏝" form="postfix" accent="true" stretchy="true" lspace="0em" rspace="0em"
'''
| StarcoderdataPython |
8180384 | <reponame>tihbe/PCRITICAL<filename>modules/topologies.py
from dataclasses import dataclass, asdict
from typing import Union, Tuple
import logging
import numpy as np
import networkx as nx
from modules import reporter
from scipy.spatial.distance import cdist
from scipy.sparse import bsr_matrix, vstack
from sklearn.metrics import pairwise_distances_chunked
_logger = logging.getLogger(__name__)
class SmallWorldTopology(nx.DiGraph):
"""Create a small-world type topology by creating a i1*i2*i3 cube of neurons separated by distance
neuron_spacing in a 3d shape of j1*j2*j3 cubes distanced by minicolumn_spacing. i1, i2, i3 is the
minicolumn_shape while j1, j2, j3 is the macrocolumn_shape. Connectivity is distance based with prob-
ability of p_max * e^(-dist / intracolumnar_sparseness).
"""
@dataclass(frozen=True)
class Configuration:
minicolumn_shape: Tuple[int, int, int] = (2, 2, 2)
macrocolumn_shape: Tuple[int, int, int] = (4, 4, 4)
neuron_spacing: float = 10.0
minicolumn_spacing: float = 100.0
p_max: float = 0.056
intracolumnar_sparseness: float = 3 * 125
# Construct the topology using sparse matrices of size mem_available
sparse_init: bool = False
mem_available: int = 8 * 1024
init_weights: bool = True # If true, init the weights with following parameters:
inhibitory_prob: float = 0.2 # Ratio of inhibitory neurons [0, 1]
inhibitory_init_weight_range: Tuple[float, float] = (0.01, 0.2)
excitatory_init_weight_range: Tuple[float, float] = (0.1, 0.7)
spectral_radius_norm: bool = False
def __init__(self, configs: Union[Configuration, dict, nx.DiGraph]):
if type(configs) == nx.DiGraph: # Assume we're creating a copy
super().__init__(configs)
return
elif type(configs) == dict:
configs = SmallWorldTopology.Configuration(**configs)
super().__init__()
self.__dict__.update(asdict(configs))
assert (
len(self.minicolumn_shape) == 3
), "Minicolumn shape must be of dimension 3 (3D)"
assert (
len(self.macrocolumn_shape) == 3
), "Macrocolumn shape must be of dimension 3 (3D)"
# Initial neuron positions (all separated by neuron_spacing)
i, j, k = np.multiply(self.macrocolumn_shape, self.minicolumn_shape)
grid = np.mgrid[:i, :j, :k].reshape(3, -1)
x, y, z = grid * self.neuron_spacing
# Adding minicolumnSpacing (from random to small world topology)
if self.minicolumn_spacing > 0:
for d in range(3): # For each dimension
grid[d] //= self.minicolumn_shape[d]
x += grid[0] * self.minicolumn_spacing
y += grid[1] * self.minicolumn_spacing
z += grid[2] * self.minicolumn_spacing
positions = map(lambda p: {"position": p}, zip(x, y, z))
self.add_nodes_from(zip(range(len(x)), positions))
# Distance-based random connectivity
positions = np.stack(np.asarray(self.nodes.data("position"))[:, 1])
if (
self.sparse_init
): # Slower but iterative (for adjacency matrices that don't fit in memory)
distances = pairwise_distances_chunked(
positions,
metric="euclidean",
n_jobs=-1,
reduce_func=lambda chunk, start: bsr_matrix(
np.random.random(chunk.shape)
< self.p_max * np.exp(-chunk / self.intracolumnar_sparseness)
),
working_memory=self.mem_available,
)
adjacency_matrix = vstack(list(distances))
adjacency_matrix.setdiag(0) # Avoid self-connections
self.add_edges_from(zip(*adjacency_matrix.nonzero()))
else:
distances = cdist(positions, positions, "euclidean")
probabilities = self.p_max * np.exp(
-distances / self.intracolumnar_sparseness
)
np.fill_diagonal(probabilities, 0) # Avoid self-connections
rand_matrix = np.random.random(probabilities.shape)
i, j = np.nonzero(rand_matrix < probabilities)
self.add_edges_from(zip(i, j))
n_neurons = self.number_of_nodes()
self.inhibitory_neurons = set(
np.random.permutation(n_neurons)[: int(n_neurons * self.inhibitory_prob)]
)
for u, v in self.edges:
if u in self.inhibitory_neurons:
self.edges[u, v]["weight"] = -np.random.uniform(
*self.inhibitory_init_weight_range
)
else:
self.edges[u, v]["weight"] = np.random.uniform(
*self.excitatory_init_weight_range
)
if self.spectral_radius_norm:
spectral_radius = lambda matrix: np.max(np.abs(np.linalg.eigvals(matrix)))
adj = nx.adjacency_matrix(self, weight="weight").todense()
scale = 1.0 / spectral_radius(np.abs(adj))
for i, (u, v) in enumerate(self.edges):
self.edges[u, v]["weight"] = self.edges[u, v]["weight"] * scale
if _logger.isEnabledFor(logging.INFO):
# Some extra info about the topology
out_degrees = np.array(self.out_degree())[:, 1]
reporter.log_metrics(
{
"number-of-neurons": n_neurons,
"number-of-synapses": self.number_of_edges(),
"excitatory-ratio": 100.0
* (1.0 - len(self.inhibitory_neurons) / n_neurons),
"avg-out-degree": np.mean(out_degrees),
"nb-out-degree-0": len(out_degrees) - np.count_nonzero(out_degrees),
"nb-isolates": nx.number_of_isolates(self),
}
)
# if not self.sparse_init:
# algebraic_connectivity = nx.algebraic_connectivity(self.to_undirected())
# sigma = nx.sigma(self.to_undirected(), seed=np.random.randint(0, 2**32-1))
# _logger.info("Small-world coefficient (sigma): %.5", sigma)
# omega = nx.omega(self.to_undirected(), seed=np.random.randint(0, 2**32-1))
# _logger.info("Small-world coefficient (omega): %.5", omega)
# rich_club_coefficient = nx.rich_club_coefficient(self.to_undirected(), seed=np.random.randint(0, 2**32-1))
# avg_rich_club_coeff = np.mean(list(rich_club_coefficient.values()))
# _logger.info("Rich club coefficient: %.5f", avg_rich_club_coeff)
| StarcoderdataPython |
398716 | <reponame>COVID-Weather/regionmask
import warnings
import numpy as np
import xarray as xr
from .utils import _is_180, _wrapAngle, equally_spaced
def _mask(
self,
lon_or_obj,
lat=None,
lon_name="lon",
lat_name="lat",
method=None,
xarray=None,
wrap_lon=None,
):
"""
create a grid as mask of a set of regions for given lat/ lon grid
Parameters
----------
lon_or_obj : array_like or object
Can either be (1) a longitude array and then lat needs to be
given. Or an object where the longitude and latitude can be
retrived as:
lon = lon_or_obj[lon_name]
lat = lon_or_obj[lat_name]
lat : array_like, (optional)
If 'lon_or_obj' is a longitude array, the latitude needs to be
specified here.
lon_name, optional
Name of longitude in 'lon_or_obj'. Default: 'lon'.
lat_name, optional
Name of latgitude in 'lon_or_obj'. Default: 'lat'
method : None | "rasterize" | "shapely" | "legacy"
Set method used to determine wether a gridpoint lies in a region.
xarray : None | bool, optional
Deprecated. If None or True returns an xarray DataArray, if False returns a
numpy ndarray. Default: None.
wrap_lon : None | bool | 180 | 360, optional
If the regions and the provided longitude do not have the same
base (i.e. one is -180..180 and the other 0..360) one of them
must be wrapped. This can be done with wrap_lon.
If wrap_lon is None autodetects whether the longitude needs to be
wrapped. If wrap_lon is False, nothing is done. If wrap_lon is True,
longitude data is wrapped to 360 if its minimum is smaller
than 0 and wrapped to 180 if its maximum is larger than 180.
Returns
-------
mask : ndarray or xarray DataSet
Method - rasterize
------------------
"rasterize" uses `rasterio.features.rasterize`. This method offers a 50 to 100
speedup compared to "legacy". It only works for equally spaced lon and lat grids.
Method - legacy
---------------
Uses the following:
>>> from matplotlib.path import Path
>>> bbPath = Path(((0, 0), (0, 1), (1, 1.), (1, 0)))
>>> bbPath.contains_point((0.5, 0.5))
This method is slower than the others and its edge behaviour is inconsistent
(see https://github.com/matplotlib/matplotlib/issues/9704).
"""
lat_orig = lat
lon, lat = _extract_lon_lat(lon_or_obj, lat, lon_name, lat_name)
lon = np.array(lon)
lat = np.array(lat)
# automatically detect whether wrapping is necessary
if wrap_lon is None:
regions_is_180 = self.lon_180
grid_is_180 = _is_180(lon.min(), lon.max())
wrap_lon = not regions_is_180 == grid_is_180
if wrap_lon:
lon_old = lon.copy()
lon = _wrapAngle(lon, wrap_lon)
if method is None:
method = "rasterize" if equally_spaced(lon, lat) else "shapely"
elif method == "rasterize":
if not equally_spaced(lon, lat):
raise ValueError(
"`lat` and `lon` must be equally spaced to use" "`method='rasterize'`"
)
elif method == "legacy":
msg = "The method 'legacy' will be removed in a future version."
warnings.warn(msg, FutureWarning, stacklevel=3)
if method == "legacy":
func = _mask_contains
data = self.coords
elif method == "rasterize":
func = _mask_rasterize
data = self.polygons
elif method == "shapely":
func = _mask_shapely
data = self.polygons
else:
msg = "Only methods 'rasterize', 'shapely', and 'legacy' are implemented"
raise NotImplementedError(msg)
mask = func(lon, lat, data, numbers=self.numbers)
if np.all(np.isnan(mask)):
msg = "All elements of mask are NaN. Try to set 'wrap_lon=True'."
print(msg)
if xarray is None:
xarray = True
else:
msg = (
"Passing the `xarray` keyword is deprecated. Future versions of regionmask will"
" always return an xarray Dataset. Use `mask.values` to obtain a numpy grid."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
if xarray:
# wrap the angle back
if wrap_lon:
lon = lon_old
if lon.ndim == 1:
mask = _create_xarray(mask, lon, lat, lon_name, lat_name)
else:
mask = _create_xarray_2D(mask, lon_or_obj, lat_orig, lon_name, lat_name)
return mask
def _extract_lon_lat(lon_or_obj, lat, lon_name, lat_name):
# extract lon/ lat via __getitem__
if lat is None:
lon = lon_or_obj[lon_name]
lat = lon_or_obj[lat_name]
else:
lon = lon_or_obj
return lon, lat
def _create_xarray(mask, lon, lat, lon_name, lat_name):
"""create an xarray DataArray"""
# create the xarray output
coords = {lat_name: lat, lon_name: lon}
mask = xr.DataArray(mask, coords=coords, dims=(lat_name, lon_name), name="region")
return mask
def _create_xarray_2D(mask, lon_or_obj, lat, lon_name, lat_name):
"""create an xarray DataArray for 2D fields"""
lon2D, lat2D = _extract_lon_lat(lon_or_obj, lat, lon_name, lat_name)
if isinstance(lon2D, xr.DataArray):
dim1D_names = lon2D.dims
dim1D_0 = lon2D[dim1D_names[0]]
dim1D_1 = lon2D[dim1D_names[1]]
else:
dim1D_names = (lon_name + "_idx", lat_name + "_idx")
dim1D_0 = np.arange(np.array(lon2D).shape[0])
dim1D_1 = np.arange(np.array(lon2D).shape[1])
# dict with the coordinates
coords = {
dim1D_names[0]: dim1D_0,
dim1D_names[1]: dim1D_1,
lat_name: (dim1D_names, lat2D),
lon_name: (dim1D_names, lon2D),
}
mask = xr.DataArray(mask, coords=coords, dims=dim1D_names)
return mask
def create_mask_contains(lon, lat, coords, fill=np.NaN, numbers=None):
"""
create the mask of a list of regions, given the lat and lon coords
Parameters
----------
lon : ndarray
Numpy array containing the midpoints of the longitude.
lat : ndarray
Numpy array containing the midpoints of the latitude.
coords : list of nx2 arays
List of the coordinates outlining the regions
fill : float, optional
Fill value for for Default: np.NaN.
numbers : list of int, optional
If not given 0:n_coords - 1 is used.
"""
msg = (
"The function `create_mask_contains` is deprecated and will be removed in a"
" future version. Please use ``regionmask.Regions(coords).mask(lon, lat)``"
" instead."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
lon, lat, numbers = _parse_input(lon, lat, coords, fill, numbers)
return _mask_contains(lon, lat, coords, numbers, fill=fill)
def _mask_contains(lon, lat, coords, numbers, fill=np.NaN):
import matplotlib.path as mplPath
LON, LAT, out, shape = _get_LON_LAT_out_shape(lon, lat, fill)
# get all combinations if lat lon points
lonlat = list(zip(LON, LAT))
# loop through all polygons
for i in range(len(coords)):
cs = np.array(coords[i])
isnan = np.isnan(cs[:, 0])
if np.any(isnan):
cs = np.split(cs, np.nonzero(isnan)[0])
else:
cs = [cs]
for c in cs:
bbPath = mplPath.Path(c)
sel = bbPath.contains_points(lonlat)
out[sel] = numbers[i]
return out.reshape(shape)
def _mask_shapely(lon, lat, polygons, numbers, fill=np.NaN):
"""
create a mask using shapely.vectorized.contains
"""
import shapely.vectorized as shp_vect
lon, lat, numbers = _parse_input(lon, lat, polygons, fill, numbers)
LON, LAT, out, shape = _get_LON_LAT_out_shape(lon, lat, fill)
# add a tiny offset to get a consistent edge behaviour
LON = LON - 1 * 10 ** -8
LAT = LAT - 1 * 10 ** -10
for i, polygon in enumerate(polygons):
sel = shp_vect.contains(polygon, LON, LAT)
out[sel] = numbers[i]
return out.reshape(shape)
def _parse_input(lon, lat, coords, fill, numbers):
lon = np.asarray(lon)
lat = np.asarray(lat)
n_coords = len(coords)
if numbers is None:
numbers = range(n_coords)
else:
assert len(numbers) == n_coords
msg = "The fill value should not be one of the region numbers."
assert fill not in numbers, msg
return lon, lat, numbers
def _get_LON_LAT_out_shape(lon, lat, fill):
if lon.ndim == 2:
LON, LAT = lon, lat
else:
LON, LAT = np.meshgrid(lon, lat)
shape = LON.shape
LON, LAT = LON.flatten(), LAT.flatten()
# create output variable
out = np.empty(shape=shape).flatten()
out.fill(fill)
return LON, LAT, out, shape
def _transform_from_latlon(lon, lat):
"""perform an affine tranformation to the latitude/longitude coordinates"""
from affine import Affine
lat = np.asarray(lat)
lon = np.asarray(lon)
d_lon = lon[1] - lon[0]
d_lat = lat[1] - lat[0]
trans = Affine.translation(lon[0] - d_lon / 2, lat[0] - d_lat / 2)
scale = Affine.scale(d_lon, d_lat)
return trans * scale
def _mask_rasterize(lon, lat, polygons, numbers, fill=np.NaN, **kwargs):
""" Rasterize a list of (geometry, fill_value) tuples onto the given coordinates.
This only works for 1D lat and lon arrays.
for internal use: does not check valitity of input
"""
# subtract a tiny offset: https://github.com/mapbox/rasterio/issues/1844
lon = np.asarray(lon) - 1 * 10 ** -8
lat = np.asarray(lat) - 1 * 10 ** -10
return _mask_rasterize_no_offset(lon, lat, polygons, numbers, fill, **kwargs)
def _mask_rasterize_no_offset(lon, lat, polygons, numbers, fill=np.NaN, **kwargs):
""" Rasterize a list of (geometry, fill_value) tuples onto the given coordinates.
This only works for 1D lat and lon arrays.
for internal use: does not check valitity of input
"""
# TODO: use only this function once https://github.com/mapbox/rasterio/issues/1844
# is resolved
from rasterio import features
shapes = zip(polygons, numbers)
transform = _transform_from_latlon(lon, lat)
out_shape = (len(lat), len(lon))
raster = features.rasterize(
shapes,
out_shape=out_shape,
fill=fill,
transform=transform,
dtype=np.float,
**kwargs
)
return raster
| StarcoderdataPython |
225259 | <gh_stars>0
import pickle
import re
import Levenshtein as lev
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
class Models:
def __init__(self, dataset, baseline1=False, baseline2=False):
# Set the model to load in local from the file 'assets/multiNB_model.sav'
self.local_load_model_ = True
# Get the train and test sets
self.dataset = dataset
# Prepare some useful data
self.price_ranges = self.dataset.restaurant_info_df['pricerange'].unique()
self.areas = self.dataset.restaurant_info_df['area'].unique()
self.foods = self.dataset.restaurant_info_df['food'].unique()
# DataFrame of possible restaurants
self.restaurants = pd.DataFrame()
# One entry of self.restaurants that is recommended to the user
self.recommendation = None
# Index, which entry of self.restaurants is currently recommended
self.index = -1
# Here you can activate and deactivate the models
self.models = {
# 'logReg': LogisticRegression(C=100, random_state=0, max_iter=1000),
# 'decTree': DecisionTreeClassifier(),
# 'SVM': SVC(gamma='scale', probability=True, C=1),
'multiNB': MultinomialNB(),
# 'kNeigh': KNeighborsClassifier(n_neighbors=3)
}
# Here you can activate if you'd like one or multiple models at the same time
# From task 1.b on, it is not possible to have multiple models
self.singleModel = True
self.singleModelName = 'multiNB' # Default one
self.baseline1 = baseline1
self.baseline2 = baseline2
self.endLoading = False
# Train the models if the model is not locally saved
if self.local_load_model_:
filename = 'assets/multiNB_model.sav'
self.models['multiNB'] = pickle.load(open(filename, 'rb'))
else:
for model in self.models:
self.models[model].fit(self.dataset.x_train, self.dataset.y_train)
# print(''.join(['-----> Loading model: ', model]))
# LOGs
# print('-----> All models have been loaded and trained\n')
self.endLoading = True
def showPerformances(self):
# Function that prints the accuracy and AUCROC of the chosen or of all models
self.endLoading = False
if self.baseline2:
print('-----> baseline 2')
dialog_act = []
for example in self.dataset.x_test:
dialog_act.append(self.baseline2Expressions(example))
accurate = accuracy_score(self.dataset.y_test, dialog_act)
print(''.join(['-----> Accuracy: ', str(accurate)]))
if self.singleModel:
# Calculate performance for single model
print(''.join(['-----> Model: ', str(self.models[self.singleModelName])]))
acc = self.__accuracy(self.models[self.singleModelName])
aucroc = self.__AUCROC(self.models[self.singleModelName])
print(''.join(['-----> Accuracy: ', str(acc)]))
print(''.join(['-----> AUCROC : ', str(aucroc)]))
print(''.join(['----->']))
else:
# Calculate performance of each model
for model in self.models:
print(''.join(['-----> Model: ', model]))
acc = self.__accuracy(self.models[model])
aucroc = self.__AUCROC(self.models[model])
print(''.join(['-----> Accuracy: ', str(acc)]))
print(''.join(['-----> AUCROC : ', str(aucroc)]))
print(''.join(['----->']))
print('')
self.endLoading = True
def __AUCROC(self, model):
# AUC - ROC curve is a performance measurement for classification problem at various thresholds settings. ROC
# is a probability curve and AUC represents degree or measure of separability. It tells how much model is
# capable of distinguishing between classes. Higher the AUC, better the model is at predicting 0s as 0s and
# 1s as 1s. By analogy, Higher the AUC, better the model is at distinguishing between patients with disease
# and no disease.
# One-vs-One and One-vs-Rest ROC AUC scores averaged between macro and weighted by prevalence
y_prob = model.predict_proba(self.dataset.x_test)
macro_roc_auc_ovo = metrics.roc_auc_score(self.dataset.y_test, y_prob, multi_class="ovo", average="macro")
weighted_roc_auc_ovo = metrics.roc_auc_score(self.dataset.y_test, y_prob, multi_class="ovo", average="weighted")
macro_roc_auc_ovr = metrics.roc_auc_score(self.dataset.y_test, y_prob, multi_class="ovr", average="macro")
weighted_roc_auc_ovr = metrics.roc_auc_score(self.dataset.y_test, y_prob, multi_class="ovr", average="weighted")
return (macro_roc_auc_ovo + weighted_roc_auc_ovo + macro_roc_auc_ovr + weighted_roc_auc_ovr) / 4
def __accuracy(self, model):
# Calculate accuracy of the model
predicted = model.predict(self.dataset.x_test)
return np.mean(predicted == self.dataset.y_test)
def setSingleModel(self, singleModel=True, name='multiNB'):
# Set the single model, default is multiNB
self.singleModel = singleModel
self.singleModelName = name
def evalueNewUtterance(self, utterance):
# Classify the input from the user as one of the utterances based on the used model
# Majority-class baseline
if self.baseline1:
return 'inform'
# Second rule-based baseline
if self.baseline2:
return self.baseline2Expressions(utterance)
# Machine Learning model
if self.singleModel:
test = self.dataset.count_vect.transform([utterance])
predicted = self.models[self.singleModelName].predict(test)
return predicted[0]
else:
for model in self.models:
print(''.join(['-----> Model: ', model]))
print(''.join(['-----> New utterance: ', utterance]))
test = self.dataset.count_vect.transform([utterance])
predicted = self.models[self.singleModelName].predict(test)
print(''.join(['-----> Evaluated as: ', str(predicted)]))
print(''.join(['----->']))
print('')
def baseline2Expressions(self, utterance):
# Use regular expressions or key-words to classify the dialog act in baseline 2
ack = re.search("okay|um|ok|umh|ah", utterance)
affirm = re.search("yes|right|alright|yeah|perfect|correct|cool|nice|awesome|great|sounds", utterance)
bye = re.search("good bye|bye|darling|dear|goodb|[a-z]*bye", utterance)
confirm = re.search("is it|said|yea", utterance)
deny = re.search("alternative|dont|not|cannot|doesnt|shitty|suck|sucks|hate|wrong|fuck", utterance)
hello = re.search("^hello|^hi|^hey|^halo", utterance)
inform = re.search("food|christmas|asian|west|north|east|south|thai[a-z]*|austra[a_z]*|chin[a-z]+|want[a-z]*|ita[a-z]*|exp[a-z]+|veg[e\-i]tarian|recommend|french|information|downtown|looking|searching|help|serve[a-z]+|rest[a-z][a-z]+|viet[a-z]*|seafood|food|turki[a-z]+|cheap|pizza|moder[a-z]+|kitchen|oriental|mexican|child|european", utterance)
negate = re.search("not|any", utterance)
null = re.search("hm|sil|mmhmm|ringing|laugh[a-z]*|huh|sigh|missing|inaudible|cough|oh|noise|yawning|tv_noise|uh|background_speech|breath", utterance)
repeat = re.search("sorry|repeat|again|answer", utterance)
reqalts = re.search("anything|how about|what about|alternative|different|asian", utterance)
reqmore = re.search("more|suggestions", utterance)
request = re.search("their|may|pri[sc]e|wheres|what is|whats|nu[a-z]*|options|ad[a-z]*|post[a-z]*|locat[a-z]+|range|venue", utterance)
restart = re.search("start over|nevermind|restart", utterance)
thankyou = re.search("thank you|welcome|thank|thanks|day|good[a-z]*|afternoon", utterance)
if affirm != None:
return 'affirm'
if ack != None:
return 'ack'
if bye !=None:
return 'bye'
if confirm != None:
return 'confirm'
if deny != None:
return 'deny'
if hello != None:
return 'hello'
if inform != None:
return 'inform'
if negate != None:
return 'negate'
if null != None:
return 'null'
if repeat != None:
return 'repeat'
if reqalts != None:
return 'reqalts'
if reqmore != None:
return 'reqmore'
if request != None:
return 'request'
if restart != None:
return 'restart'
if thankyou != None:
return 'thankyou'
return 'not found'
def extractPreference(self, string, sys_utter):
# Extract the preference the user has entered. If none of the values of the restaurant data set are found,
# use the Levenshtein distance to find the closest match.
# Lower the string in input
string = string.lower()
# Preference extraction by pricerange, area and food
pref = {
'pricerange': '',
'area': '',
'food': ''
}
# Look for the pricerange in the text
for price in self.price_ranges:
if price in string:
pref['pricerange'] = price
break
# Look for the area in the text
for area in self.areas:
if area in string:
pref['area'] = area
break
# Look for the food in the text
for food in self.foods:
if food in string:
pref['food'] = food
break
# In case the food is not found, use some keyword matching,
# maybe there is a spelling error
# keywords matching here
track_replaced = []
for missing_pref in ['food', 'area', 'pricerange']:
if pref[missing_pref] == '':
keywords = {'food': [['food'], ['restaurant']], 'area': [['in', 'the'], ['area']],
'pricerange': [['priced'], ['restaurant'], ['price'], ['pricerange']]}
keyword_selection = {'food': self.foods, 'area': self.areas, 'pricerange': self.price_ranges}
# Extract variable before relevant keyword
words = string.split(" ")
for poss_keyword in keywords[missing_pref]:
if set(poss_keyword).issubset(set(words)):
miss_word = ''
if missing_pref != 'area':
for indx in range(0, len(words)):
# if the keyword matches a word in the sentence and doesn't occur
# in the set of keywords, it's a match
if words[indx] == poss_keyword[0]:
if not any([words[indx - 1]] in sublist for sublist in keywords.values()):
miss_word = words[indx - 1]
else:
for indx in range(0, len(words)):
# for matching 'in' 'the'
if indx != 0 and [words[indx-1], words[indx]] == keywords[missing_pref][0]:
if not any([words[indx + 1]] in sublist for sublist in keywords.values()):
miss_word = words[indx + 1]
# for matching the other keywords
elif words[indx] == keywords[missing_pref][1][0]:
if not any([words[indx - 1]] in sublist for sublist in keywords.values()):
miss_word = words[indx - 1]
# rudimentary any preference detection
if miss_word == 'any':
pref[missing_pref] = 'any'
break
# possible matches should be at least three characters
if len(miss_word) < 3:
break
# since food and pricerange share the 'restaurant' keyword, check if matching preference
# not overlap
if missing_pref != 'food' and (pref['food'] == miss_word or miss_word in track_replaced):
break
if missing_pref != 'pricerange' and \
(pref['pricerange'] == miss_word or miss_word in track_replaced):
break
# Check for matching with Levenshtein distance
# more than distance 3 it will fail
dst = {
'1': [],
'2': [],
'3': []
}
# let's check if every misspelled word before the keyword
# can be similar to something in the dataset
for stuff in keyword_selection[missing_pref]:
if lev.distance(stuff, miss_word) <= 3:
dst[str(lev.distance(stuff, miss_word))].append(stuff)
# finally let's set the preference giving priority to the one with less distance
change_check = 0
if len(dst['1']):
for entry in dst['1']:
utterance = self.__patternMatchingRequest(miss_word, entry, sys_utter)
if utterance == 'affirm':
pref[missing_pref] = entry
change_check = 1
break
elif len(dst['2']):
for entry in dst['2']:
utterance = self.__patternMatchingRequest(miss_word, entry, sys_utter)
if utterance == 'affirm':
pref[missing_pref] = entry
change_check = 1
break
elif len(dst['3']):
for entry in dst['3']:
utterance = self.__patternMatchingRequest(miss_word, entry, sys_utter)
if utterance == 'affirm':
change_check = 1
pref[missing_pref] = entry
break
# Add something to say that in case the word does not exist the user need to specify it
if not change_check:
print(sys_utter['word_not_exist'].replace('miss_word', miss_word)
.replace('MISS_WORD', miss_word))
print(sys_utter['apologize'])
return pref
def __patternMatchingRequest(self, miss_word, entry, sys_utter):
# If the user writes something that could resemble a word in the dataset,
# it is asked if the matched word is what the user meant
print(sys_utter['clarify'].replace('miss_word', miss_word).replace('entry', entry)
.replace('MISS_WORD', miss_word).replace('ENTRY', entry))
user_input = input("-----> ")
utterance = self.evalueNewUtterance(user_input)
while utterance != 'affirm' and utterance != 'negate':
if utterance != 'repeat':
print(sys_utter['repeat_ask'])
print(sys_utter['clarify'].replace('miss_word', miss_word).replace('entry', entry)
.replace('MISS_WORD', miss_word).replace('ENTRY', entry))
user_input = input("-----> ")
utterance = self.evalueNewUtterance(user_input)
print(utterance)
return utterance
def lookupInRestaurantInfo(self, preferences):
# Look up any restaurants that fit to the given preferences.
# Preference is true if it is not filled, so restaurants can already be looked up
if preferences.loc[0]['food'] == 'any' or preferences.loc[0]['food'] == '':
food = True
else:
food = self.dataset.restaurant_info_df['food'] == preferences.loc[0]['food']
if preferences.loc[0]['area'] == 'any' or preferences.loc[0]['area'] == '':
area = True
else:
area = self.dataset.restaurant_info_df['area'] == preferences.loc[0]['area']
if preferences.loc[0]['pricerange'] == 'any' or preferences.loc[0]['pricerange'] == '':
pricerange = True
else:
pricerange = self.dataset.restaurant_info_df['pricerange'] == preferences.loc[0]['pricerange']
# prevent from crashing due to no preferences
if isinstance(food, bool) and isinstance(area, bool) and isinstance(pricerange, bool) \
and food and area and pricerange:
restaurants = self.dataset.restaurant_info_df
else:
restaurants = self.dataset.restaurant_info_df.loc[food & area & pricerange]
self.restaurants = restaurants.reset_index()
def __recommendRestaurant(self):
# Internal function to return the restaurant at the position of the index.
if len(self.restaurants) == 0:
# set to -1, as it will be increased by one in method below
self.index = -1
return []
if len(self.restaurants) <= self.index:
self.index = -1
# return [-1] here to execute utterance saying that no more restaurants were found.
# if another is requested, start over
return [-1]
return self.restaurants.loc[self.index]
def recommend(self, preferences):
# Recommend one restaurant given the preferences.
# If there are multiple restaurants that satisfy the preferences, return a new one each time.
self.index += 1
if not set(self.restaurants):
# If self.restaurants is empty, for example, because preferences have been updated,
# look up restaurants in data set
self.lookupInRestaurantInfo(preferences)
self.recommendation = self.__recommendRestaurant()
def extractDetails(self, string):
# Function that extracts further details about the chosen restaurant from the user input.
string = string.lower()
requested = []
# possible keywords the user can use to get the requested detail
details = {"restaurantname": ["name", "restaurantname", "restaurant"],
"pricerange": ["price", "pricerange", "cost", "how much"],
"area": ["area", "city", "part", "region"],
"food": ["food", "type", "category"],
"phone": ["phone number", "phone", "number"],
"addr": ["address", "street", "where"],
"postcode": ["postcode", "post code"]}
for element in details.keys():
names = details.get(element)
for item in names:
if item in string:
# use first element of array in tuple, as that is a nicer name than the key
requested.append((details.get(element)[0], self.recommendation[element]))
break
return requested
| StarcoderdataPython |
9667943 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'ApprovalSettingsArgs',
'ApprovalStageArgs',
'RoleManagementPolicyApprovalRuleArgs',
'RoleManagementPolicyAuthenticationContextRuleArgs',
'RoleManagementPolicyEnablementRuleArgs',
'RoleManagementPolicyExpirationRuleArgs',
'RoleManagementPolicyNotificationRuleArgs',
'RoleManagementPolicyRuleTargetArgs',
'SingleUserArgs',
]
@pulumi.input_type
class ApprovalSettingsArgs:
def __init__(__self__, *,
approval_mode: Optional[pulumi.Input[Union[str, 'ApprovalMode']]] = None,
approval_stages: Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]] = None,
is_approval_required: Optional[pulumi.Input[bool]] = None,
is_approval_required_for_extension: Optional[pulumi.Input[bool]] = None,
is_requestor_justification_required: Optional[pulumi.Input[bool]] = None):
"""
The approval settings.
:param pulumi.Input[Union[str, 'ApprovalMode']] approval_mode: The type of rule
:param pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]] approval_stages: The approval stages of the request.
:param pulumi.Input[bool] is_approval_required: Determine whether approval is required or not.
:param pulumi.Input[bool] is_approval_required_for_extension: Determine whether approval is required for assignment extension.
:param pulumi.Input[bool] is_requestor_justification_required: Determine whether requestor justification required.
"""
if approval_mode is not None:
pulumi.set(__self__, "approval_mode", approval_mode)
if approval_stages is not None:
pulumi.set(__self__, "approval_stages", approval_stages)
if is_approval_required is not None:
pulumi.set(__self__, "is_approval_required", is_approval_required)
if is_approval_required_for_extension is not None:
pulumi.set(__self__, "is_approval_required_for_extension", is_approval_required_for_extension)
if is_requestor_justification_required is not None:
pulumi.set(__self__, "is_requestor_justification_required", is_requestor_justification_required)
@property
@pulumi.getter(name="approvalMode")
def approval_mode(self) -> Optional[pulumi.Input[Union[str, 'ApprovalMode']]]:
"""
The type of rule
"""
return pulumi.get(self, "approval_mode")
@approval_mode.setter
def approval_mode(self, value: Optional[pulumi.Input[Union[str, 'ApprovalMode']]]):
pulumi.set(self, "approval_mode", value)
@property
@pulumi.getter(name="approvalStages")
def approval_stages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]]:
"""
The approval stages of the request.
"""
return pulumi.get(self, "approval_stages")
@approval_stages.setter
def approval_stages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApprovalStageArgs']]]]):
pulumi.set(self, "approval_stages", value)
@property
@pulumi.getter(name="isApprovalRequired")
def is_approval_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approval is required or not.
"""
return pulumi.get(self, "is_approval_required")
@is_approval_required.setter
def is_approval_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approval_required", value)
@property
@pulumi.getter(name="isApprovalRequiredForExtension")
def is_approval_required_for_extension(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approval is required for assignment extension.
"""
return pulumi.get(self, "is_approval_required_for_extension")
@is_approval_required_for_extension.setter
def is_approval_required_for_extension(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approval_required_for_extension", value)
@property
@pulumi.getter(name="isRequestorJustificationRequired")
def is_requestor_justification_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether requestor justification required.
"""
return pulumi.get(self, "is_requestor_justification_required")
@is_requestor_justification_required.setter
def is_requestor_justification_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_requestor_justification_required", value)
@pulumi.input_type
class ApprovalStageArgs:
def __init__(__self__, *,
approval_stage_time_out_in_days: Optional[pulumi.Input[int]] = None,
escalation_approvers: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]] = None,
escalation_time_in_minutes: Optional[pulumi.Input[int]] = None,
is_approver_justification_required: Optional[pulumi.Input[bool]] = None,
is_escalation_enabled: Optional[pulumi.Input[bool]] = None,
primary_approvers: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]] = None):
"""
The approval stage.
:param pulumi.Input[int] approval_stage_time_out_in_days: The time in days when approval request would be timed out.
:param pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]] escalation_approvers: The escalation approver of the request.
:param pulumi.Input[int] escalation_time_in_minutes: The time in minutes when the approval request would be escalated if the primary approver does not approves.
:param pulumi.Input[bool] is_approver_justification_required: Determine whether approver need to provide justification for his decision.
:param pulumi.Input[bool] is_escalation_enabled: The value determine whether escalation feature is enabled.
:param pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]] primary_approvers: The primary approver of the request.
"""
if approval_stage_time_out_in_days is not None:
pulumi.set(__self__, "approval_stage_time_out_in_days", approval_stage_time_out_in_days)
if escalation_approvers is not None:
pulumi.set(__self__, "escalation_approvers", escalation_approvers)
if escalation_time_in_minutes is not None:
pulumi.set(__self__, "escalation_time_in_minutes", escalation_time_in_minutes)
if is_approver_justification_required is not None:
pulumi.set(__self__, "is_approver_justification_required", is_approver_justification_required)
if is_escalation_enabled is not None:
pulumi.set(__self__, "is_escalation_enabled", is_escalation_enabled)
if primary_approvers is not None:
pulumi.set(__self__, "primary_approvers", primary_approvers)
@property
@pulumi.getter(name="approvalStageTimeOutInDays")
def approval_stage_time_out_in_days(self) -> Optional[pulumi.Input[int]]:
"""
The time in days when approval request would be timed out.
"""
return pulumi.get(self, "approval_stage_time_out_in_days")
@approval_stage_time_out_in_days.setter
def approval_stage_time_out_in_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "approval_stage_time_out_in_days", value)
@property
@pulumi.getter(name="escalationApprovers")
def escalation_approvers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]:
"""
The escalation approver of the request.
"""
return pulumi.get(self, "escalation_approvers")
@escalation_approvers.setter
def escalation_approvers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]):
pulumi.set(self, "escalation_approvers", value)
@property
@pulumi.getter(name="escalationTimeInMinutes")
def escalation_time_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The time in minutes when the approval request would be escalated if the primary approver does not approves.
"""
return pulumi.get(self, "escalation_time_in_minutes")
@escalation_time_in_minutes.setter
def escalation_time_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "escalation_time_in_minutes", value)
@property
@pulumi.getter(name="isApproverJustificationRequired")
def is_approver_justification_required(self) -> Optional[pulumi.Input[bool]]:
"""
Determine whether approver need to provide justification for his decision.
"""
return pulumi.get(self, "is_approver_justification_required")
@is_approver_justification_required.setter
def is_approver_justification_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_approver_justification_required", value)
@property
@pulumi.getter(name="isEscalationEnabled")
def is_escalation_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value determine whether escalation feature is enabled.
"""
return pulumi.get(self, "is_escalation_enabled")
@is_escalation_enabled.setter
def is_escalation_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_escalation_enabled", value)
@property
@pulumi.getter(name="primaryApprovers")
def primary_approvers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]:
"""
The primary approver of the request.
"""
return pulumi.get(self, "primary_approvers")
@primary_approvers.setter
def primary_approvers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SingleUserArgs']]]]):
pulumi.set(self, "primary_approvers", value)
@pulumi.input_type
class RoleManagementPolicyApprovalRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
setting: Optional[pulumi.Input['ApprovalSettingsArgs']] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyApprovalRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input['ApprovalSettingsArgs'] setting: The approval setting
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyApprovalRule')
if id is not None:
pulumi.set(__self__, "id", id)
if setting is not None:
pulumi.set(__self__, "setting", setting)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyApprovalRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def setting(self) -> Optional[pulumi.Input['ApprovalSettingsArgs']]:
"""
The approval setting
"""
return pulumi.get(self, "setting")
@setting.setter
def setting(self, value: Optional[pulumi.Input['ApprovalSettingsArgs']]):
pulumi.set(self, "setting", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyAuthenticationContextRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
claim_value: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyAuthenticationContextRule'.
:param pulumi.Input[str] claim_value: The claim value.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[bool] is_enabled: The value indicating if rule is enabled.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyAuthenticationContextRule')
if claim_value is not None:
pulumi.set(__self__, "claim_value", claim_value)
if id is not None:
pulumi.set(__self__, "id", id)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyAuthenticationContextRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> Optional[pulumi.Input[str]]:
"""
The claim value.
"""
return pulumi.get(self, "claim_value")
@claim_value.setter
def claim_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "claim_value", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating if rule is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyEnablementRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
enabled_rules: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
id: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyEnablementRule'.
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_rules: The list of enabled rules.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyEnablementRule')
if enabled_rules is not None:
pulumi.set(__self__, "enabled_rules", enabled_rules)
if id is not None:
pulumi.set(__self__, "id", id)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyEnablementRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter(name="enabledRules")
def enabled_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of enabled rules.
"""
return pulumi.get(self, "enabled_rules")
@enabled_rules.setter
def enabled_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enabled_rules", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyExpirationRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
is_expiration_required: Optional[pulumi.Input[bool]] = None,
maximum_duration: Optional[pulumi.Input[str]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyExpirationRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[bool] is_expiration_required: The value indicating whether expiration is required.
:param pulumi.Input[str] maximum_duration: The maximum duration of expiration in timespan.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyExpirationRule')
if id is not None:
pulumi.set(__self__, "id", id)
if is_expiration_required is not None:
pulumi.set(__self__, "is_expiration_required", is_expiration_required)
if maximum_duration is not None:
pulumi.set(__self__, "maximum_duration", maximum_duration)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyExpirationRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isExpirationRequired")
def is_expiration_required(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating whether expiration is required.
"""
return pulumi.get(self, "is_expiration_required")
@is_expiration_required.setter
def is_expiration_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_expiration_required", value)
@property
@pulumi.getter(name="maximumDuration")
def maximum_duration(self) -> Optional[pulumi.Input[str]]:
"""
The maximum duration of expiration in timespan.
"""
return pulumi.get(self, "maximum_duration")
@maximum_duration.setter
def maximum_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_duration", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyNotificationRuleArgs:
def __init__(__self__, *,
rule_type: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
notification_level: Optional[pulumi.Input[Union[str, 'NotificationLevel']]] = None,
notification_recipients: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notification_type: Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]] = None,
recipient_type: Optional[pulumi.Input[Union[str, 'RecipientType']]] = None,
target: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']] = None):
"""
The role management policy rule.
:param pulumi.Input[str] rule_type: The type of rule
Expected value is 'RoleManagementPolicyNotificationRule'.
:param pulumi.Input[str] id: The id of the rule.
:param pulumi.Input[Union[str, 'NotificationLevel']] notification_level: The notification level.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notification_recipients: The list notification recipients.
:param pulumi.Input[Union[str, 'NotificationDeliveryMechanism']] notification_type: The type of notification.
:param pulumi.Input[Union[str, 'RecipientType']] recipient_type: The recipient type.
:param pulumi.Input['RoleManagementPolicyRuleTargetArgs'] target: The target of the current rule.
"""
pulumi.set(__self__, "rule_type", 'RoleManagementPolicyNotificationRule')
if id is not None:
pulumi.set(__self__, "id", id)
if notification_level is not None:
pulumi.set(__self__, "notification_level", notification_level)
if notification_recipients is not None:
pulumi.set(__self__, "notification_recipients", notification_recipients)
if notification_type is not None:
pulumi.set(__self__, "notification_type", notification_type)
if recipient_type is not None:
pulumi.set(__self__, "recipient_type", recipient_type)
if target is not None:
pulumi.set(__self__, "target", target)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> pulumi.Input[str]:
"""
The type of rule
Expected value is 'RoleManagementPolicyNotificationRule'.
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: pulumi.Input[str]):
pulumi.set(self, "rule_type", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the rule.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="notificationLevel")
def notification_level(self) -> Optional[pulumi.Input[Union[str, 'NotificationLevel']]]:
"""
The notification level.
"""
return pulumi.get(self, "notification_level")
@notification_level.setter
def notification_level(self, value: Optional[pulumi.Input[Union[str, 'NotificationLevel']]]):
pulumi.set(self, "notification_level", value)
@property
@pulumi.getter(name="notificationRecipients")
def notification_recipients(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list notification recipients.
"""
return pulumi.get(self, "notification_recipients")
@notification_recipients.setter
def notification_recipients(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notification_recipients", value)
@property
@pulumi.getter(name="notificationType")
def notification_type(self) -> Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]]:
"""
The type of notification.
"""
return pulumi.get(self, "notification_type")
@notification_type.setter
def notification_type(self, value: Optional[pulumi.Input[Union[str, 'NotificationDeliveryMechanism']]]):
pulumi.set(self, "notification_type", value)
@property
@pulumi.getter(name="recipientType")
def recipient_type(self) -> Optional[pulumi.Input[Union[str, 'RecipientType']]]:
"""
The recipient type.
"""
return pulumi.get(self, "recipient_type")
@recipient_type.setter
def recipient_type(self, value: Optional[pulumi.Input[Union[str, 'RecipientType']]]):
pulumi.set(self, "recipient_type", value)
@property
@pulumi.getter
def target(self) -> Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]:
"""
The target of the current rule.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: Optional[pulumi.Input['RoleManagementPolicyRuleTargetArgs']]):
pulumi.set(self, "target", value)
@pulumi.input_type
class RoleManagementPolicyRuleTargetArgs:
def __init__(__self__, *,
caller: Optional[pulumi.Input[str]] = None,
enforced_settings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inheritable_settings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
level: Optional[pulumi.Input[str]] = None,
operations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
target_objects: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The role management policy rule target.
:param pulumi.Input[str] caller: The caller of the setting.
:param pulumi.Input[Sequence[pulumi.Input[str]]] enforced_settings: The list of enforced settings.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inheritable_settings: The list of inheritable settings.
:param pulumi.Input[str] level: The assignment level to which it is applied.
:param pulumi.Input[Sequence[pulumi.Input[str]]] operations: The type of operation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_objects: The list of target objects.
"""
if caller is not None:
pulumi.set(__self__, "caller", caller)
if enforced_settings is not None:
pulumi.set(__self__, "enforced_settings", enforced_settings)
if inheritable_settings is not None:
pulumi.set(__self__, "inheritable_settings", inheritable_settings)
if level is not None:
pulumi.set(__self__, "level", level)
if operations is not None:
pulumi.set(__self__, "operations", operations)
if target_objects is not None:
pulumi.set(__self__, "target_objects", target_objects)
@property
@pulumi.getter
def caller(self) -> Optional[pulumi.Input[str]]:
"""
The caller of the setting.
"""
return pulumi.get(self, "caller")
@caller.setter
def caller(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "caller", value)
@property
@pulumi.getter(name="enforcedSettings")
def enforced_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of enforced settings.
"""
return pulumi.get(self, "enforced_settings")
@enforced_settings.setter
def enforced_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enforced_settings", value)
@property
@pulumi.getter(name="inheritableSettings")
def inheritable_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of inheritable settings.
"""
return pulumi.get(self, "inheritable_settings")
@inheritable_settings.setter
def inheritable_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "inheritable_settings", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
The assignment level to which it is applied.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def operations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The type of operation.
"""
return pulumi.get(self, "operations")
@operations.setter
def operations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "operations", value)
@property
@pulumi.getter(name="targetObjects")
def target_objects(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of target objects.
"""
return pulumi.get(self, "target_objects")
@target_objects.setter
def target_objects(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_objects", value)
@pulumi.input_type
class SingleUserArgs:
def __init__(__self__, *,
user_type: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
is_backup: Optional[pulumi.Input[bool]] = None):
"""
The detail of a user.
:param pulumi.Input[str] user_type: The object id of the user.
Expected value is 'SingleUser'.
:param pulumi.Input[str] description: The description of the user.
:param pulumi.Input[str] id: The object id of the user.
:param pulumi.Input[bool] is_backup: The value indicating whether the user is a backup fallback approver
"""
pulumi.set(__self__, "user_type", 'SingleUser')
if description is not None:
pulumi.set(__self__, "description", description)
if id is not None:
pulumi.set(__self__, "id", id)
if is_backup is not None:
pulumi.set(__self__, "is_backup", is_backup)
@property
@pulumi.getter(name="userType")
def user_type(self) -> pulumi.Input[str]:
"""
The object id of the user.
Expected value is 'SingleUser'.
"""
return pulumi.get(self, "user_type")
@user_type.setter
def user_type(self, value: pulumi.Input[str]):
pulumi.set(self, "user_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The object id of the user.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="isBackup")
def is_backup(self) -> Optional[pulumi.Input[bool]]:
"""
The value indicating whether the user is a backup fallback approver
"""
return pulumi.get(self, "is_backup")
@is_backup.setter
def is_backup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_backup", value)
| StarcoderdataPython |
5160403 | <gh_stars>1-10
import pytest
from boto3 import client
from unittest.mock import patch
from unittest.mock import MagicMock
from botocore.exceptions import ClientError
def create_mock_exc(message='', code=''):
return ClientError({
'Error': {
'Message': message,
'Code': code
}
}, 'any')
mock_bad_gateway_exc = create_mock_exc(message='Bad Gateway')
mock_condition_not_met_exc = create_mock_exc(code='ConditionalCheckFailedException')
mock_table_does_not_exist_exc = create_mock_exc(message='Cannot do operations on a non-existent table')
mock_preexisting_table_exc = create_mock_exc(message='Cannot create preexisting table')
mock_unknown_exc = create_mock_exc(message='nothing you ever heard of before')
class MockDynamoClient:
def __init__(self):
def do_nothing(*args, **kwargs):
pass
self.put_item = do_nothing
self.create_table = do_nothing
self.delete_item = do_nothing
self.describe_table = do_nothing
self.get_item = do_nothing
self.query = do_nothing
self.update_item = do_nothing
self.scan = do_nothing
mock_dynamo_client = MockDynamoClient()
| StarcoderdataPython |
8137651 | <gh_stars>10-100
from typing import (Callable,
Tuple)
from ground.base import (Location,
Relation)
from ground.hints import (Point,
Segment)
PointInCircleLocator = Callable[[Point, Point, Point, Point], Location]
SegmentEndpoints = Tuple[Point, Point]
SegmentContainmentChecker = Callable[[Segment, Point], bool]
SegmentsRelater = Callable[[Segment, Segment], Relation]
| StarcoderdataPython |
6430887 | <gh_stars>0
from lingpy import *
from sys import argv
if 'all' in argv:
fname='../output/A_Deepadung_'
else:
fname='../output/D_Deepadung_'
alms = Alignments(fname+'partial.tsv', ref='cogids')
alms.align()
alms.output('tsv', filename=fname+'aligned', prettify=False)
| StarcoderdataPython |
3257026 | <filename>tests/test_fda.py
import numpy as np
import unittest
import finite_depth_analysis as fda
c0 = 299792458
n0 = 1.5
c = c0/n0
def h_old(lambdas, lambda_prime, Z, r=-1, mode=1, k0=0):
omega = 2*np.pi*c/lambdas
omega_prime = 2*np.pi*c/lambda_prime
if mode==2:
if Z == 'inf' or Z == 'infinite':
Z = 100E-6
s1 = fda.s_z_tilde_dev(Z, omega_prime-omega, k0=k0)
s3 = fda.s_z_tilde_dev(Z, omega_prime+omega, k0=k0)
return r/2*s1 + np.conj(r)/2*s3
elif mode==3:
s2 = fda.s_z_tilde_dev(Z, omega_prime, k0=k0)
return (1+np.abs(r)**2)/2*s2
else:
s2 = fda.s_z_tilde_dev(Z, omega_prime, k0=k0)
if Z == 'inf' or Z == 'infinite':
Z = 100E-6
s1 = fda.s_z_tilde_dev(Z, omega_prime-omega, k0=k0)
s3 = fda.s_z_tilde_dev(Z, omega_prime+omega, k0=k0)
return r/2*s1 + (1+np.abs(r)**2)/2*s2 + np.conj(r)/2*s3
class TestSZTildeDev(unittest.TestCase):
def test_vectorisation(self):
self.assertEqual(True, True)
class TestH(unittest.TestCase):
lambdas_over = np.linspace(0.1, 1)
lambdas = np.linspace(0.1, 1, 10)
Z = 0.3
r = 0.2
k0 = 1
def test_dimensions(self):
A = fda.h(self.lambdas_over, self.lambdas, self.Z, r=self.r, mode=2, k0=self.k0)
A0 = fda.h(self.lambdas_over, self.lambdas, self.Z, r=self.r, mode=3, k0=self.k0)
self.assertEqual(A.shape[0], len(self.lambdas))
self.assertEqual(A.shape[1], len(self.lambdas_over))
self.assertEqual(A0.shape[0], len(self.lambdas))
self.assertEqual(A0.shape[1], len(self.lambdas_over))
def test_old(self):
A_new = fda.h(self.lambdas_over, self.lambdas, self.Z, r=self.r, mode=2, k0=self.k0)
A0_new = fda.h(self.lambdas_over, self.lambdas, self.Z, r=self.r, mode=3, k0=self.k0)
A = np.zeros((len(self.lambdas), len(self.lambdas_over)), dtype=complex)
A0 = np.zeros((len(self.lambdas), len(self.lambdas_over)), dtype=complex)
for i, lambda_prime in enumerate(self.lambdas):
A[i, :] = h_old(self.lambdas_over, lambda_prime, self.Z, r=self.r, mode=2, k0=self.k0)
A0[i, :] = h_old(self.lambdas_over, lambda_prime, self.Z, r=self.r, mode=3, k0=self.k0)
np.testing.assert_array_almost_equal(A_new, A)
np.testing.assert_array_almost_equal(A0_new, A0)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6410322 | import os
import toml
def get_version() -> str:
path = os.path.join(os.path.dirname(__file__), "..", "pyproject.toml")
config = toml.load(path)
version = config["tool"]["poetry"]["version"]
return version
if __name__ == "__main__": # pragma: no cover
print(get_version())
| StarcoderdataPython |
11287406 | #!/usr/bin/env python
# coding=utf-8
"""
Runs an execution node.
"""
import sys
import ConfigParser
from suricate.analytics import exec_node
__author__ = 'tmetsch'
config = ConfigParser.RawConfigParser()
config.read('app.conf')
# MongoDB connection
mongo = config.get('mongo', 'uri')
# Rabbit part
broker = config.get('rabbit', 'uri')
# SDK
sdk = config.get('suricate', 'python_sdk')
if __name__ == '__main__':
if len(sys.argv) < 2:
raise AttributeError('please provide a tenant id for this execution '
'node as first argument!')
user = sys.argv[1]
exec_node.ExecNode(mongo, broker, sdk, user)
| StarcoderdataPython |
1799319 | '''Crie um programa que leia o ano de nascimento de sete pessoas.
No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.'''
from datetime import date
maior = 0
menor = 0
for c in range(1, 8):
nasc = int(input(f'qual o {c}° ano de nascimento? '))
idade = date.today().year - nasc
if idade >= 18:
maior += 1
else:
menor += 1
print(f'''ao todo tivemos {maior} pessoas maior de idade,
e {menor} pessoas menor de idade.''')
| StarcoderdataPython |
6675742 | <gh_stars>0
from geo.vector import Vector
x = Vector([1,2,3])
print(x)
print(x == Vector([1,2,3]))
print(x == Vector([1,2,-3]))
print(x + Vector([1,2,-3]))
print(x - Vector([1,2,-3]))
print(Vector([1,2,-3]) - x)
| StarcoderdataPython |
3303664 | <reponame>juanrgon/advent-of-code<gh_stars>1-10
TEST = (("3,4,3,1,2", 5934),)
TEST2 = (("3,4,3,1,2", 26984457539),)
import sys
from pathlib import Path
from functools import cache
import aoc
@aoc.submit(part=1)
@aoc.get_input
@aoc.tests(TEST)
@aoc.parse_text
def part_1(raw: str, ints: list[int], strs: list[str]):
return sum(total_fish(fish, days=80) for fish in ints)
@aoc.submit(part=2)
@aoc.get_input
@aoc.tests(TEST2)
@aoc.parse_text
def part_2(raw: str, ints: list[int], strs: list[str]):
return sum(total_fish(fish, days=256) for fish in ints)
@cache
def total_fish(timer: int, days: int) -> int:
if days == 0:
return 1
if timer == 0:
return total_fish(6, days=days - 1) + total_fish(8, days=days - 1)
return total_fish(timer - 1, days=days - 1)
if __name__ == "__main__":
print("Part 1:", part_1(__file__))
print("Part 2:", part_2(__file__))
| StarcoderdataPython |
12833262 | #!/usr/bin/env python
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 7116
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connection address:', addr
fd = conn.makefile()
while 1:
data = fd.readline()
print "received data:", data
fd.write("@OK\r\n") # echo
fd.flush()
conn.close()
| StarcoderdataPython |
4802173 | <filename>classicML/api/plots/callbacks.py
from classicML.api.plots import _plt as plt
from classicML.api.plots.utils import _set_history_axis_and_background
from classicML.api.plots.utils import _history_plot_config
def plot_history(history):
"""可视化历史记录.
Arguments:
history: classicML.backend.callbacks.History 实例.
"""
_, ax = plt.subplots()
_set_history_axis_and_background(ax)
# 绘制损失曲线
ax.plot(history.loss, label=history.loss_name, c='lightcoral')
# 绘制评估曲线
ax.plot(history.metric, label=history.metric_name, c='c')
_history_plot_config()
plt.show()
| StarcoderdataPython |
1712334 | <filename>hotword.py
import snowboydecoder
import sys
import signal
import os.path
class hotword:
def __init__(self):
self.interrupted = False
self.model = ''.join([os.path.dirname(__file__), '/HARU.pmdl'])
def signal_handler(self, signal, frame):
self.interrupted = True
def interrupt_callback(self):
return self.interrupted
def start_detection(self, callback_func):
signal.signal(signal.SIGINT, self.signal_handler)
self.detector = snowboydecoder.HotwordDetector(self.model, sensitivity=0.5)
self.detector.start(detected_callback=callback_func, interrupt_check=self.interrupt_callback, sleep_time=5)
def terminate_detection(self):
self.detector.terminate()
| StarcoderdataPython |
103658 | <reponame>VU-IVM/Toponym-based-Algorithm-for-Grouped-Geoparsing-of-Social-media
import pytz
import datetime
import operator
def isoformat_2_date(datestr):
return datetime.datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%S')
def daterange(start_date, end_date, timedelta, ranges=False, include_last=False, UTC=False):
if UTC:
start_date = start_date.replace(tzinfo=pytz.UTC)
end_date = end_date.replace(tzinfo=pytz.UTC)
if not isinstance(timedelta, datetime.timedelta):
timedelta = datetime.timedelta(seconds=int(timedelta))
if include_last:
sign = operator.le
else:
sign = operator.lt
while sign(start_date, end_date):
if ranges:
yield start_date, start_date + timedelta
else:
yield start_date
start_date += timedelta
def date_handler(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError("Type not serializable")
| StarcoderdataPython |
9733865 | <filename>venv/Lib/site-packages/pybrain/rl/explorers/__init__.py
from discrete.__init__ import *
from continuous.__init__ import * | StarcoderdataPython |
3288584 | from ._basic import FilterConstant
from ._gbdt import GBDTFeatureSelector
| StarcoderdataPython |
330624 | <filename>scrapy/tests/test_settings.py
import unittest
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler
from scrapy.spider import BaseSpider
class SettingsTest(unittest.TestCase):
def test_get(self):
settings = Settings({
'TEST_ENABLED1': '1',
'TEST_ENABLED2': True,
'TEST_ENABLED3': 1,
'TEST_DISABLED1': '0',
'TEST_DISABLED2': False,
'TEST_DISABLED3': 0,
'TEST_INT1': 123,
'TEST_INT2': '123',
'TEST_FLOAT1': 123.45,
'TEST_FLOAT2': '123.45',
'TEST_LIST1': ['one', 'two'],
'TEST_LIST2': 'one,two',
'TEST_STR': 'value',
})
assert settings.getbool('TEST_ENABLED1') is True
assert settings.getbool('TEST_ENABLED2') is True
assert settings.getbool('TEST_ENABLED3') is True
assert settings.getbool('TEST_ENABLEDx') is False
assert settings.getbool('TEST_ENABLEDx', True) is True
assert settings.getbool('TEST_DISABLED1') is False
assert settings.getbool('TEST_DISABLED2') is False
assert settings.getbool('TEST_DISABLED3') is False
self.assertEqual(settings.getint('TEST_INT1'), 123)
self.assertEqual(settings.getint('TEST_INT2'), 123)
self.assertEqual(settings.getint('TEST_INTx'), 0)
self.assertEqual(settings.getint('TEST_INTx', 45), 45)
self.assertEqual(settings.getfloat('TEST_FLOAT1'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOAT2'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOATx'), 0.0)
self.assertEqual(settings.getfloat('TEST_FLOATx', 55.0), 55.0)
self.assertEqual(settings.getlist('TEST_LIST1'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LIST2'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LISTx'), [])
self.assertEqual(settings.getlist('TEST_LISTx', ['default']), ['default'])
self.assertEqual(settings['TEST_STR'], 'value')
self.assertEqual(settings.get('TEST_STR'), 'value')
self.assertEqual(settings['TEST_STRx'], None)
self.assertEqual(settings.get('TEST_STRx'), None)
self.assertEqual(settings.get('TEST_STRx', 'default'), 'default')
class CrawlerSettingsTest(unittest.TestCase):
def test_global_defaults(self):
crawler = get_crawler()
self.assertEqual(crawler.settings.getint('DOWNLOAD_TIMEOUT'), 180)
def test_defaults(self):
crawler = get_crawler()
crawler.settings.defaults['DOWNLOAD_TIMEOUT'] = '99'
self.assertEqual(crawler.settings.getint('DOWNLOAD_TIMEOUT'), 99)
def test_settings_module(self):
crawler = get_crawler({'DOWNLOAD_TIMEOUT': '3'})
self.assertEqual(crawler.settings.getint('DOWNLOAD_TIMEOUT'), 3)
def test_overrides(self):
crawler = get_crawler({'DOWNLOAD_TIMEOUT': '3'})
crawler.settings.overrides['DOWNLOAD_TIMEOUT'] = '15'
self.assertEqual(crawler.settings.getint('DOWNLOAD_TIMEOUT'), 15)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
5015976 | import logging
from ast import literal_eval
from datetime import datetime
from typing import Callable, Dict, Optional, Set, cast
import asyncpg
import discord
from discord.ext.commands import Cog, CommandError, Context, command, guild_only
from valuebot import RoleConfig, ValueBot
from valuebot.utils import get_message
from .db import ensure_points_table, get_user_points, user_change_points, user_set_points
from .roles import RoleManager
__all__ = ["PointCog"]
log = logging.getLogger(__name__)
ARITH_OPS: Dict[str, Callable[[float, float], float]] = {
"+": lambda a, b: a + b,
"-": lambda a, b: a - b,
"*": lambda a, b: a * b,
"/": lambda a, b: a / b,
"^": lambda a, b: a ** b,
}
class PointCog(Cog, name="Point"):
"""Keep track of user points."""
bot: ValueBot
role_manager: RoleManager
def __init__(self, bot: ValueBot) -> None:
self.bot = bot
self.role_manager = RoleManager(bot.config.points.roles)
@property
def pg_conn(self) -> asyncpg.Connection:
return self.bot.postgres_connection
@property
def pg_points_table(self) -> str:
return self.bot.config.postgres_points_table
@property
def point_increase_reactions(self) -> Set[str]:
return self.bot.config.points.increase_reactions
@property
def point_decrease_reactions(self) -> Set[str]:
return self.bot.config.points.decrease_reactions
@Cog.listener()
async def on_ready(self) -> None:
log.info("making sure points table exists")
await ensure_points_table(self.pg_conn, self.pg_points_table)
async def handle_reaction_change(self, payload: discord.RawReactionActionEvent, added: bool) -> None:
if payload.guild_id is None:
log.debug(f"ignoring reaction by {payload.user_id} in DMs.")
return
emoji: discord.PartialEmoji = payload.emoji
emoji_name: str = emoji.name
if emoji_name in self.point_increase_reactions:
change = 1
elif emoji_name in self.point_decrease_reactions:
change = -1
else:
return
log.debug(
f"handling reaction change (added={added}) {emoji_name} "
f"[msg={payload.message_id}, channel={payload.channel_id}, guild={payload.guild_id}]"
)
if not added:
change *= -1
channel: Optional[discord.TextChannel] = self.bot.get_channel(payload.channel_id)
if not channel:
log.warning(f"Can't track reaction change, channel with id {payload.channel_id} not in cache")
return
message = await get_message(channel, payload.message_id)
user_id = message.author.id
guild_id = message.guild.id if message.guild else None
log.debug(f"Changing points of {message.author} by {change}")
await self.change_points(user_id, guild_id, change)
@Cog.listener()
async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent) -> None:
await self.handle_reaction_change(payload, True)
@Cog.listener()
async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent) -> None:
await self.handle_reaction_change(payload, False)
async def show_points(self, ctx: Context, *, user: discord.User = None) -> None:
"""Show the amount of points a user has."""
user = user or ctx.author
guild_id = ctx.guild.id if ctx.guild else None
embed = discord.Embed(colour=discord.Colour.blue(), timestamp=datetime.utcnow())
embed.set_author(name=user.display_name, icon_url=user.avatar_url)
# remove point first for the special case of user == author
author_points = await self.change_points(ctx.author.id, guild_id, -1)
if user == ctx.author:
points = author_points
else:
points = await self.get_points(user.id, guild_id)
if points is None:
embed.description = f"{user.mention} hasn't received any points yet."
else:
role = self.get_role_for(points)
if role:
role_text = f" and is part of the role **{role.name}**"
else:
role_text = ""
embed.description = f"{user.mention} currently has **{points}** point(s) {role_text}"
embed.set_footer(
text=f"You paid a point to see the points of {user.name}. You now have {author_points} point(s).")
await self.bot.send_embed(ctx, embed)
@guild_only()
@command("points", aliases=["alter"])
async def points_cmd(self, ctx: Context, user: discord.User = None, *, value: str = None) -> None:
"""Change/Inspect a user's points."""
if value:
value = value.replace(" ", "")
user_id = user.id if user else ctx.author.id
guild_id: Optional[int] = ctx.guild.id if ctx.guild else None
if not value:
await self.show_points(ctx, user=user)
return
perms = cast(discord.TextChannel, ctx.channel).permissions_for(ctx.author)
if not perms.administrator:
raise CommandError("Your are missing the Administrator permission to manipulate points.")
try:
arith_op_str = value[0]
arith_op = ARITH_OPS[arith_op_str]
except KeyError:
arith_op_str = "="
arith_op = None
else:
value = value[1:]
try:
numeric_value = literal_eval(value)
except Exception:
log.debug(f"Couldn't interpret {value} as numeric. Showing points for user instead!")
await self.show_points(ctx, user=user)
return
if not isinstance(numeric_value, (int, float)):
raise CommandError(f"{numeric_value} (\"{value}\") is not a number!")
current_value = await self.get_points(user_id, guild_id) or 0
if arith_op is not None:
try:
new_value = arith_op(current_value, numeric_value)
except Exception:
raise CommandError(f"Invalid operation {current_value} {arith_op_str} {value}")
else:
new_value = numeric_value
new_value = round(new_value)
if new_value == current_value:
raise CommandError(f"{user.mention} already has {current_value} point(s)")
await self.set_points(user.id, guild_id, new_value)
log.info(f"changed {user}'s points from {current_value} to {new_value}")
embed = discord.Embed(
description=f"{user.mention} now has **{new_value}** point(s), changed from previous {current_value}",
colour=discord.Colour.green())
await self.bot.send_embed(ctx, embed)
def get_role_for(self, points: int) -> Optional[RoleConfig]:
"""Get the role (if available) for the given amount of points."""
return self.bot.config.points.roles.get_role(points)
async def get_points(self, user_id: int, guild_id: Optional[int]) -> Optional[int]:
"""Get a user's points.
Returns:
The amount of points the user has in the provided guild (or globally
if the guild is `None`). Returns `None` if the user doesn't have
any points yet.
"""
return await get_user_points(self.pg_conn, self.pg_points_table, user_id, guild_id)
async def set_points(self, user_id: int, guild_id: Optional[int], value: int) -> int:
"""Set a user's points to a specific value.
Args:
user_id: User whose points to set.
guild_id: Guild to set the points for, `None` for global.
value: Amount of points to set to.
Returns:
The new amount of points.
"""
prev_points = await self.get_points(user_id, guild_id)
await user_set_points(self.pg_conn, self.pg_points_table, user_id, guild_id, value)
self.bot.loop.create_task(self.on_points_change(user_id, guild_id, prev_points, value))
return value
async def change_points(self, user_id: int, guild_id: Optional[int], change: int) -> int:
"""Change a user's points relative to their current amount.
Args:
user_id: User whose points to set.
guild_id: Guild to set the points for, `None` for global.
change: Points to add to the user's current points. Can be negative
to subtract points.
Returns:
The new amount of points.
"""
prev_points = await self.get_points(user_id, guild_id)
await user_change_points(self.pg_conn, self.pg_points_table, user_id, guild_id, change)
if prev_points:
next_points = prev_points + change
else:
next_points = change
self.bot.loop.create_task(self.on_points_change(user_id, guild_id, prev_points, next_points))
return prev_points or 0
async def on_points_change(self, user_id: int, guild_id: Optional[int],
old_points: Optional[int], new_points: Optional[int]) -> None:
"""Called when a user's points changes."""
log.info(f"handling points change from {old_points} to {new_points} for {user_id} (guild={guild_id})")
if not guild_id:
return
guild: Optional[discord.Guild] = self.bot.get_guild(guild_id)
if not guild:
return
if not cast(discord.Member, guild.me).guild_permissions.manage_roles:
log.debug(f"unable to adjust roles in {guild}: missing permissions")
return
member: Optional[discord.Member] = guild.get_member(user_id)
if not member:
return
role = self.get_role_for(new_points)
await self.role_manager.assign_role(member, role)
| StarcoderdataPython |
6615776 | from ucsb.models import user, user_asset
from rest_framework.response import Response
from django.forms.models import model_to_dict
from rest_framework.decorators import api_view
from ucsb.repository.helpers import *
from opt.optimization import *
from opt.base_load import *
from opt.utility.solar import *
from opt.utility.weather import *
from opt.utility.send_email import *
from opt.utility.scheduler import optimization
# from ucsb.repository.helpers import *
import smtplib, ssl
@api_view(['POST', 'DELETE'])
def update_user(request):
if request.method == 'POST':
params = ["email", "low_limit", "max_limit", "battery_size", "cost_or_shutoff", "hours_of_power", "longitude", "latitude", "phone_number"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
low_limit = request.data.get('low_limit')
max_limit = request.data.get('max_limit')
battery_size = request.data.get('battery_size')
cost_or_shutoff = request.data.get('cost_or_shutoff')
hours_of_power = request.data.get('hours_of_power')
longitude = request.data.get('longitude')
latitude = request.data.get('latitude')
phone_number = request.data.get('phone_number')
tmp_user = user.objects.get(user_email=email)
tmp_user.low_limit = low_limit
tmp_user.max_limit = max_limit
tmp_user.battery_size = battery_size
tmp_user.cost_or_shutoff = cost_or_shutoff
tmp_user.hours_of_power = hours_of_power
tmp_user.longitude = longitude
tmp_user.latitude = latitude
tmp_user.phone_number = phone_number
tmp_user.save()
return Response({"detail": "User updated successfully"}, status=200)
elif request.method == 'DELETE':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
tmp_user = user.objects.get(user_email=email)
tmp_user.delete()
return Response({"detail": "User deleted successfully"})
else:
return Response({"detail": "Error: Invalid request"}, status=400)
#test function
@api_view(['GET'])
def getAllUsers(request):
res = []
result = user.objects.all()
for r in result:
res.append(model_to_dict(r))
return Response(res)
@api_view(['GET'])
def get_user(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.query_params.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.query_params, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.query_params.get('email')
try:
tmp_user = user.objects.get(user_email=email)
return Response(model_to_dict(tmp_user))
except:
return Response({"detail": "Error: User does not exist"}, status=400)
@api_view(['POST'])
def register_user(request):
if request.method == 'POST':
email = request.data.get('email')
if email == '':
return Response({"detail": "Email cannot be empty"}, status=400)
try:
a_user = user.objects.get(user_email=email)
return Response({"detail": "User has already registered"})
except (user.DoesNotExist, user.MultipleObjectsReturned):
tmp_user = user(user_email=email)
tmp_user.save()
return Response({"detail": "User created successfully"}, status=200)
else:
return Response({"detail": "Error: Invalid request"}, status=400)
@api_view(['POST'])
def opt(request):
params = ["email"]
#Check for Required Fields
for p in params:
if request.data.get(p, None) == None:
return Response(
{"message": "Missing Required Parameters: {}".format(p)},
status = 400)
#Check for Invalid Parameters
if verify(request.data, params):
return Response(
{"message": "Request has invalid parameter not in {}".format(params)},
status = 400)
email = request.data.get('email')
result = optimization(email)
#return best_threshold, good_times, best_schedule, and should_charge
return Response({"detail": result}, status=200) | StarcoderdataPython |
6418880 | #!/usr/bin/env python3
"""
Exercise 44: Cages
Animals are now housed in cages.
"""
from animals import Parrot, Sheep, Snake, Wolf
from cage import Cage
if __name__ == '__main__':
c = Cage()
print(c)
print()
c2 = Cage()
a_wolf = Wolf('grey')
a_sheep = Sheep('black')
a_snake = Snake('green')
a_parrot = Parrot('red')
c2.add_animals(a_wolf)
print(c2)
print()
c2.add_animals(a_parrot, a_sheep, a_snake)
print(c2)
print()
| StarcoderdataPython |
8148991 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djsonb.fields
import uuid
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('ashlar', '0005_auto_20150423_0148'),
]
operations = [
migrations.CreateModel(
name='BoundaryPolygon',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('data', djsonb.fields.JsonBField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='boundary',
name='geom',
),
migrations.AddField(
model_name='boundary',
name='color',
field=models.CharField(default=b'blue', max_length=64),
),
migrations.AddField(
model_name='boundary',
name='data_fields',
field=djsonb.fields.JsonBField(null=True, blank=True),
),
migrations.AddField(
model_name='boundary',
name='display_field',
field=models.CharField(max_length=10, null=True, blank=True),
),
migrations.AddField(
model_name='boundarypolygon',
name='boundary',
field=models.ForeignKey(related_name='polygons', to='ashlar.Boundary', null=True),
),
]
| StarcoderdataPython |
3263110 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .greedy_alltoall import *
from .gather_scatter_alltoall import *
from .alltoall_subproblem import *
| StarcoderdataPython |
12824015 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2009-15 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from six.moves import input, intern
from pyopencl.version import VERSION, VERSION_STATUS, VERSION_TEXT # noqa
import logging
logger = logging.getLogger(__name__)
import os
os.environ["PYOPENCL_HOME"] = os.path.dirname(os.path.abspath(__file__))
try:
import pyopencl._cl as _cl
except ImportError:
import os
from os.path import dirname, join, realpath
if realpath(join(os.getcwd(), "pyopencl")) == realpath(dirname(__file__)):
from warnings import warn
warn("It looks like you are importing PyOpenCL from "
"its source directory. This likely won't work.")
raise
import numpy as np
import sys
_PYPY = '__pypy__' in sys.builtin_module_names
_CPY2 = not _PYPY and sys.version_info < (3,)
from pyopencl._cl import ( # noqa
get_cl_header_version,
program_kind,
status_code,
platform_info,
device_type,
device_info,
device_fp_config,
device_mem_cache_type,
device_local_mem_type,
device_exec_capabilities,
device_svm_capabilities,
command_queue_properties,
context_info,
gl_context_info,
context_properties,
command_queue_info,
queue_properties,
mem_flags,
svm_mem_flags,
channel_order,
channel_type,
mem_object_type,
mem_info,
image_info,
addressing_mode,
filter_mode,
sampler_info,
map_flags,
program_info,
program_build_info,
program_binary_type,
kernel_info,
kernel_arg_info,
kernel_arg_address_qualifier,
kernel_arg_access_qualifier,
kernel_arg_type_qualifier,
kernel_work_group_info,
event_info,
command_type,
command_execution_status,
profiling_info,
mem_migration_flags,
device_partition_property,
device_affinity_domain,
Error, MemoryError, LogicError, RuntimeError,
Platform,
get_platforms,
Device,
Context,
CommandQueue,
LocalMemory,
MemoryObjectHolder,
MemoryObject,
MemoryMap,
Buffer,
_Program,
Kernel,
Event,
wait_for_events,
NannyEvent,
enqueue_nd_range_kernel,
_enqueue_marker,
_enqueue_read_buffer,
_enqueue_write_buffer,
_enqueue_copy_buffer,
_enqueue_read_buffer_rect,
_enqueue_write_buffer_rect,
_enqueue_copy_buffer_rect,
_enqueue_read_image,
_enqueue_copy_image,
_enqueue_write_image,
_enqueue_copy_image_to_buffer,
_enqueue_copy_buffer_to_image,
have_gl,
ImageFormat,
get_supported_image_formats,
Image,
Sampler,
DeviceTopologyAmd,
)
if not _PYPY:
# FIXME: Add back to default set when pypy support catches up
from pyopencl._cl import ( # noqa
enqueue_map_buffer,
enqueue_map_image,
)
if get_cl_header_version() >= (1, 1):
from pyopencl._cl import ( # noqa
UserEvent,
)
if get_cl_header_version() >= (1, 2):
from pyopencl._cl import ( # noqa
_enqueue_marker_with_wait_list,
_enqueue_barrier_with_wait_list,
unload_platform_compiler,
enqueue_migrate_mem_objects,
_enqueue_fill_buffer,
enqueue_fill_image,
ImageDescriptor,
)
if get_cl_header_version() >= (2, 0):
from pyopencl._cl import ( # noqa
SVMAllocation,
SVM,
# FIXME
#enqueue_svm_migratemem,
)
if _cl.have_gl():
from pyopencl._cl import ( # noqa
gl_object_type,
gl_texture_info,
GLBuffer,
GLRenderBuffer,
GLTexture,
)
try:
from pyopencl._cl import get_apple_cgl_share_group # noqa
except ImportError:
pass
try:
from pyopencl._cl import ( # noqa
enqueue_acquire_gl_objects,
enqueue_release_gl_objects,
)
except ImportError:
pass
import inspect as _inspect
CONSTANT_CLASSES = tuple(
getattr(_cl, name) for name in dir(_cl)
if _inspect.isclass(getattr(_cl, name))
and name[0].islower() and name not in ["zip", "map", "range"])
# {{{ diagnostics
class CompilerWarning(UserWarning):
pass
def compiler_output(text):
import os
from warnings import warn
if int(os.environ.get("PYOPENCL_COMPILER_OUTPUT", "0")):
warn(text, CompilerWarning)
else:
warn("Non-empty compiler output encountered. Set the "
"environment variable PYOPENCL_COMPILER_OUTPUT=1 "
"to see more.", CompilerWarning)
# }}}
# {{{ find pyopencl shipped source code
def _find_pyopencl_include_path():
from pkg_resources import Requirement, resource_filename, DistributionNotFound
try:
# Try to find the resource with pkg_resources (the recommended
# setuptools approach)
include_path = resource_filename(
Requirement.parse("pyopencl"), "pyopencl/cl")
except DistributionNotFound:
# If pkg_resources can't find it (e.g. if the module is part of a
# frozen application), try to find the include path in the same
# directory as this file
from os.path import join, abspath, dirname, exists
include_path = join(abspath(dirname(__file__)), "cl")
# If that doesn't exist, just re-raise the exception caught from
# resource_filename.
if not exists(include_path):
raise
# Quote the path if it contains a space and is not quoted already.
# See https://github.com/inducer/pyopencl/issues/250 for discussion.
if ' ' in include_path and not include_path.startswith('"'):
return '"' + include_path + '"'
else:
return include_path
# }}}
# {{{ build option munging
def _split_options_if_necessary(options):
if isinstance(options, six.string_types):
import shlex
if six.PY2:
# shlex.split takes bytes (py2 str) on py2
if isinstance(options, six.text_type):
options = options.encode("utf-8")
else:
# shlex.split takes unicode (py3 str) on py3
if isinstance(options, six.binary_type):
options = options.decode("utf-8")
options = shlex.split(options)
return options
def _find_include_path(options):
def unquote(path):
if path.startswith('"') and path.endswith('"'):
return path[1:-1]
else:
return path
include_path = ["."]
option_idx = 0
while option_idx < len(options):
option = options[option_idx].strip()
if option.startswith("-I") or option.startswith("/I"):
if len(option) == 2:
if option_idx+1 < len(options):
include_path.append(unquote(options[option_idx+1]))
option_idx += 2
else:
include_path.append(unquote(option[2:].lstrip()))
option_idx += 1
else:
option_idx += 1
# }}}
return include_path
def _options_to_bytestring(options):
def encode_if_necessary(s):
if isinstance(s, six.text_type):
return s.encode("utf-8")
else:
return s
return b" ".join(encode_if_necessary(s) for s in options)
# }}}
# {{{ Program (wrapper around _Program, adds caching support)
_DEFAULT_BUILD_OPTIONS = []
_DEFAULT_INCLUDE_OPTIONS = ["-I", _find_pyopencl_include_path()]
# map of platform.name to build options list
_PLAT_BUILD_OPTIONS = {
"Oclgrind": ["-D", "PYOPENCL_USING_OCLGRIND"],
}
def enable_debugging(platform_or_context):
"""Enables debugging for all code subsequently compiled by
PyOpenCL on the passed *platform*. Alternatively, a context
may be passed.
"""
if isinstance(platform_or_context, Context):
platform = platform_or_context.devices[0].platform
else:
platform = platform_or_context
if "AMD Accelerated" in platform.name:
_PLAT_BUILD_OPTIONS.setdefault(platform.name, []).extend(
["-g", "-O0"])
import os
os.environ["CPU_MAX_COMPUTE_UNITS"] = "1"
else:
from warnings import warn
warn("do not know how to enable debugging on '%s'"
% platform.name)
class Program(object):
def __init__(self, arg1, arg2=None, arg3=None):
if arg2 is None:
# 1-argument form: program
self._prg = arg1
elif arg3 is None:
# 2-argument form: context, source
context, source = arg1, arg2
from pyopencl.tools import is_spirv
if is_spirv(source):
# FIXME no caching in SPIR-V case
self._context = context
self._prg = _cl._create_program_with_il(context, source)
return
import sys
if isinstance(source, six.text_type) and sys.version_info < (3,):
from warnings import warn
warn("Received OpenCL source code in Unicode, "
"should be ASCII string. Attempting conversion.",
stacklevel=2)
source = source.encode()
self._context = context
self._source = source
self._prg = None
else:
context, device, binaries = arg1, arg2, arg3
self._context = context
self._prg = _cl._Program(context, device, binaries)
self._build_duration_info = None
def _get_prg(self):
if self._prg is not None:
return self._prg
else:
# "no program" can only happen in from-source case.
from warnings import warn
warn("Pre-build attribute access defeats compiler caching.",
stacklevel=3)
self._prg = _cl._Program(self._context, self._source)
del self._context
return self._prg
def get_info(self, arg):
return self._get_prg().get_info(arg)
def get_build_info(self, *args, **kwargs):
return self._get_prg().get_build_info(*args, **kwargs)
def all_kernels(self):
result = self._get_prg().all_kernels()
for knl in result:
knl._setup(self)
return result
def int_ptr(self):
return self._get_prg().int_ptr
int_ptr = property(int_ptr, doc=_cl._Program.int_ptr.__doc__)
def from_int_ptr(int_ptr_value, retain=True):
return Program(_cl._Program.from_int_ptr(int_ptr_value, retain))
from_int_ptr.__doc__ = _cl._Program.from_int_ptr.__doc__
from_int_ptr = staticmethod(from_int_ptr)
def __getattr__(self, attr):
try:
knl = Kernel(self, attr)
# Nvidia does not raise errors even for invalid names,
# but this will give an error if the kernel is invalid.
knl.num_args
knl._source = getattr(self, "_source", None)
if self._build_duration_info is not None:
build_descr, was_cached, duration = self._build_duration_info
if duration > 0.2:
logger.info("build program: kernel '%s' was part of a "
"lengthy %s (%.2f s)" % (attr, build_descr, duration))
return knl
except LogicError:
raise AttributeError("'%s' was not found as a program "
"info attribute or as a kernel name" % attr)
# {{{ build
@classmethod
def _process_build_options(cls, context, options, _add_include_path=False):
options = _split_options_if_necessary(options)
options = (options
+ _DEFAULT_BUILD_OPTIONS
+ _DEFAULT_INCLUDE_OPTIONS
+ _PLAT_BUILD_OPTIONS.get(
context.devices[0].platform.name, []))
import os
forced_options = os.environ.get("PYOPENCL_BUILD_OPTIONS")
if forced_options:
options = options + forced_options.split()
return (
_options_to_bytestring(options),
_find_include_path(options))
def build(self, options=[], devices=None, cache_dir=None):
options_bytes, include_path = self._process_build_options(
self._context, options)
if cache_dir is None:
cache_dir = getattr(self._context, 'cache_dir', None)
import os
build_descr = None
if os.environ.get("PYOPENCL_NO_CACHE") and self._prg is None:
build_descr = "uncached source build (cache disabled by user)"
self._prg = _cl._Program(self._context, self._source)
from time import time
start_time = time()
was_cached = False
if self._prg is not None:
# uncached
if build_descr is None:
build_descr = "uncached source build"
self._build_and_catch_errors(
lambda: self._prg.build(options_bytes, devices),
options_bytes=options_bytes)
else:
# cached
from pyopencl.cache import create_built_program_from_source_cached
self._prg, was_cached = self._build_and_catch_errors(
lambda: create_built_program_from_source_cached(
self._context, self._source, options_bytes, devices,
cache_dir=cache_dir, include_path=include_path),
options_bytes=options_bytes, source=self._source)
if was_cached:
build_descr = "cache retrieval"
else:
build_descr = "source build resulting from a binary cache miss"
del self._context
end_time = time()
self._build_duration_info = (build_descr, was_cached, end_time-start_time)
return self
def _build_and_catch_errors(self, build_func, options_bytes, source=None):
try:
return build_func()
except _cl.RuntimeError as e:
msg = str(e)
if options_bytes:
msg = msg + "\n(options: %s)" % options_bytes.decode("utf-8")
if source is not None:
from tempfile import NamedTemporaryFile
srcfile = NamedTemporaryFile(mode="wt", delete=False, suffix=".cl")
try:
srcfile.write(source)
finally:
srcfile.close()
msg = msg + "\n(source saved as %s)" % srcfile.name
code = e.code
routine = e.routine
err = _cl.RuntimeError(
_cl._ErrorRecord(
msg=msg,
code=code,
routine=routine))
# Python 3.2 outputs the whole list of currently active exceptions
# This serves to remove one (redundant) level from that nesting.
raise err
# }}}
def compile(self, options=[], devices=None, headers=[]):
options_bytes, _ = self._process_build_options(self._context, options)
self._get_prg().compile(options_bytes, devices, headers)
return self
def __eq__(self, other):
return self._get_prg() == other._get_prg()
def __ne__(self, other):
return self._get_prg() == other._get_prg()
def __hash__(self):
return hash(self._get_prg())
def create_program_with_built_in_kernels(context, devices, kernel_names):
if not isinstance(kernel_names, str):
kernel_names = ":".join(kernel_names)
return Program(_Program.create_with_built_in_kernels(
context, devices, kernel_names))
def link_program(context, programs, options=None, devices=None):
if options is None:
options = []
options_bytes = _options_to_bytestring(_split_options_if_necessary(options))
programs = [prg._get_prg() for prg in programs]
raw_prg = _Program.link(context, programs, options_bytes, devices)
return Program(raw_prg)
# }}}
# {{{ monkeypatch C++ wrappers to add functionality
def _add_functionality():
def generic_get_cl_version(self):
import re
version_string = self.version
match = re.match(r"^OpenCL ([0-9]+)\.([0-9]+) .*$", version_string)
if match is None:
raise RuntimeError("%s %s returned non-conformant "
"platform version string '%s'" %
(type(self).__name__, self, version_string))
return int(match.group(1)), int(match.group(2))
# {{{ Platform
def platform_repr(self):
return "<pyopencl.Platform '%s' at 0x%x>" % (self.name, self.int_ptr)
Platform.__repr__ = platform_repr
Platform._get_cl_version = generic_get_cl_version
# }}}
# {{{ Device
def device_repr(self):
return "<pyopencl.Device '%s' on '%s' at 0x%x>" % (
self.name.strip(), self.platform.name.strip(), self.int_ptr)
def device_persistent_unique_id(self):
return (self.vendor, self.vendor_id, self.name, self.version)
Device.__repr__ = device_repr
# undocumented for now:
Device._get_cl_version = generic_get_cl_version
Device.persistent_unique_id = property(device_persistent_unique_id)
# }}}
# {{{ Context
context_old_init = Context.__init__
def context_init(self, devices, properties, dev_type, cache_dir=None):
if cache_dir is not None:
from warnings import warn
warn("The 'cache_dir' argument to the Context constructor "
"is deprecated and no longer has an effect. "
"It was removed because it only applied to the wrapper "
"object and not the context itself, leading to inconsistencies.",
DeprecationWarning, stacklevel=2)
context_old_init(self, devices, properties, dev_type)
def context_repr(self):
return "<pyopencl.Context at 0x%x on %s>" % (self.int_ptr,
", ".join(repr(dev) for dev in self.devices))
def context_get_cl_version(self):
return self.devices[0].platform._get_cl_version()
Context.__repr__ = context_repr
from pytools import memoize_method
Context._get_cl_version = memoize_method(context_get_cl_version)
# }}}
# {{{ CommandQueue
def command_queue_enter(self):
return self
def command_queue_exit(self, exc_type, exc_val, exc_tb):
self.finish()
def command_queue_get_cl_version(self):
return self.context._get_cl_version()
CommandQueue.__enter__ = command_queue_enter
CommandQueue.__exit__ = command_queue_exit
CommandQueue._get_cl_version = memoize_method(command_queue_get_cl_version)
# }}}
# {{{ _Program (the internal, non-caching version)
def program_get_build_logs(self):
build_logs = []
for dev in self.get_info(_cl.program_info.DEVICES):
try:
log = self.get_build_info(dev, program_build_info.LOG)
except Exception:
log = "<error retrieving log>"
build_logs.append((dev, log))
return build_logs
def program_build(self, options_bytes, devices=None):
err = None
try:
self._build(options=options_bytes, devices=devices)
except Error as e:
msg = str(e) + "\n\n" + (75*"="+"\n").join(
"Build on %s:\n\n%s" % (dev, log)
for dev, log in self._get_build_logs())
code = e.code
routine = e.routine
err = _cl.RuntimeError(
_cl._ErrorRecord(
msg=msg,
code=code,
routine=routine))
if err is not None:
# Python 3.2 outputs the whole list of currently active exceptions
# This serves to remove one (redundant) level from that nesting.
raise err
message = (75*"="+"\n").join(
"Build on %s succeeded, but said:\n\n%s" % (dev, log)
for dev, log in self._get_build_logs()
if log is not None and log.strip())
if message:
if self.kind() == program_kind.SOURCE:
build_type = "From-source build"
elif self.kind() == program_kind.BINARY:
build_type = "From-binary build"
elif self.kind() == program_kind.IL:
build_type = "From-IL build"
else:
build_type = "Build"
compiler_output("%s succeeded, but resulted in non-empty logs:\n%s"
% (build_type, message))
return self
_cl._Program._get_build_logs = program_get_build_logs
_cl._Program.build = program_build
# }}}
# {{{ Event
class ProfilingInfoGetter:
def __init__(self, event):
self.event = event
def __getattr__(self, name):
info_cls = _cl.profiling_info
try:
inf_attr = getattr(info_cls, name.upper())
except AttributeError:
raise AttributeError("%s has no attribute '%s'"
% (type(self), name))
else:
return self.event.get_profiling_info(inf_attr)
_cl.Event.profile = property(ProfilingInfoGetter)
# }}}
# {{{ Kernel
kernel_old_init = Kernel.__init__
kernel_old_get_info = Kernel.get_info
kernel_old_get_work_group_info = Kernel.get_work_group_info
def kernel_init(self, prg, name):
if not isinstance(prg, _cl._Program):
prg = prg._get_prg()
kernel_old_init(self, prg, name)
self._setup(prg)
def kernel__setup(self, prg):
self._source = getattr(prg, "_source", None)
from pyopencl.invoker import generate_enqueue_and_set_args
self._enqueue, self._set_args = generate_enqueue_and_set_args(
self.function_name, self.num_args, self.num_args,
None,
warn_about_arg_count_bug=None,
work_around_arg_count_bug=None)
self._wg_info_cache = {}
return self
def kernel_set_scalar_arg_dtypes(self, scalar_arg_dtypes):
self._scalar_arg_dtypes = tuple(scalar_arg_dtypes)
# {{{ arg counting bug handling
# For example:
# https://github.com/pocl/pocl/issues/197
# (but Apple CPU has a similar bug)
work_around_arg_count_bug = False
warn_about_arg_count_bug = False
from pyopencl.characterize import has_struct_arg_count_bug
count_bug_per_dev = [
has_struct_arg_count_bug(dev, self.context)
for dev in self.context.devices]
from pytools import single_valued
if any(count_bug_per_dev):
if all(count_bug_per_dev):
work_around_arg_count_bug = single_valued(count_bug_per_dev)
else:
warn_about_arg_count_bug = True
# }}}
from pyopencl.invoker import generate_enqueue_and_set_args
self._enqueue, self._set_args = generate_enqueue_and_set_args(
self.function_name,
len(scalar_arg_dtypes), self.num_args,
self._scalar_arg_dtypes,
warn_about_arg_count_bug=warn_about_arg_count_bug,
work_around_arg_count_bug=work_around_arg_count_bug)
def kernel_get_work_group_info(self, param, device):
try:
return self._wg_info_cache[param, device]
except KeyError:
pass
result = kernel_old_get_work_group_info(self, param, device)
self._wg_info_cache[param, device] = result
return result
def kernel_set_args(self, *args, **kwargs):
# Need to dupicate the 'self' argument for dynamically generated method
return self._set_args(self, *args, **kwargs)
def kernel_call(self, queue, global_size, local_size, *args, **kwargs):
# __call__ can't be overridden directly, so we need this
# trampoline hack.
return self._enqueue(self, queue, global_size, local_size, *args, **kwargs)
def kernel_capture_call(self, filename, queue, global_size, local_size,
*args, **kwargs):
from pyopencl.capture_call import capture_kernel_call
capture_kernel_call(self, filename, queue, global_size, local_size,
*args, **kwargs)
def kernel_get_info(self, param_name):
val = kernel_old_get_info(self, param_name)
if isinstance(val, _Program):
return Program(val)
else:
return val
Kernel.__init__ = kernel_init
Kernel._setup = kernel__setup
Kernel.get_work_group_info = kernel_get_work_group_info
Kernel.set_scalar_arg_dtypes = kernel_set_scalar_arg_dtypes
Kernel.set_args = kernel_set_args
Kernel.__call__ = kernel_call
Kernel.capture_call = kernel_capture_call
Kernel.get_info = kernel_get_info
# }}}
# {{{ ImageFormat
def image_format_repr(self):
return "ImageFormat(%s, %s)" % (
channel_order.to_string(self.channel_order,
"<unknown channel order 0x%x>"),
channel_type.to_string(self.channel_data_type,
"<unknown channel data type 0x%x>"))
def image_format_eq(self, other):
return (self.channel_order == other.channel_order
and self.channel_data_type == other.channel_data_type)
def image_format_ne(self, other):
return not image_format_eq(self, other)
def image_format_hash(self):
return hash((type(self), self.channel_order, self.channel_data_type))
ImageFormat.__repr__ = image_format_repr
ImageFormat.__eq__ = image_format_eq
ImageFormat.__ne__ = image_format_ne
ImageFormat.__hash__ = image_format_hash
# }}}
# {{{ Image
image_old_init = Image.__init__
def image_init(self, context, flags, format, shape=None, pitches=None,
hostbuf=None, is_array=False, buffer=None):
if shape is None and hostbuf is None:
raise Error("'shape' must be passed if 'hostbuf' is not given")
if shape is None and hostbuf is not None:
shape = hostbuf.shape
if hostbuf is not None and not \
(flags & (mem_flags.USE_HOST_PTR | mem_flags.COPY_HOST_PTR)):
from warnings import warn
warn("'hostbuf' was passed, but no memory flags to make use of it.")
if hostbuf is None and pitches is not None:
raise Error("'pitches' may only be given if 'hostbuf' is given")
if context._get_cl_version() >= (1, 2) and get_cl_header_version() >= (1, 2):
if buffer is not None and is_array:
raise ValueError(
"'buffer' and 'is_array' are mutually exclusive")
if len(shape) == 3:
if buffer is not None:
raise TypeError(
"'buffer' argument is not supported for 3D arrays")
elif is_array:
image_type = mem_object_type.IMAGE2D_ARRAY
else:
image_type = mem_object_type.IMAGE3D
elif len(shape) == 2:
if buffer is not None:
raise TypeError(
"'buffer' argument is not supported for 2D arrays")
elif is_array:
image_type = mem_object_type.IMAGE1D_ARRAY
else:
image_type = mem_object_type.IMAGE2D
elif len(shape) == 1:
if buffer is not None:
image_type = mem_object_type.IMAGE1D_BUFFER
elif is_array:
raise TypeError("array of zero-dimensional images not supported")
else:
image_type = mem_object_type.IMAGE1D
else:
raise ValueError("images cannot have more than three dimensions")
desc = ImageDescriptor()
desc.image_type = image_type
desc.shape = shape # also sets desc.array_size
if pitches is None:
desc.pitches = (0, 0)
else:
desc.pitches = pitches
desc.num_mip_levels = 0 # per CL 1.2 spec
desc.num_samples = 0 # per CL 1.2 spec
desc.buffer = buffer
image_old_init(self, context, flags, format, desc, hostbuf)
else:
# legacy init for CL 1.1 and older
if is_array:
raise TypeError("'is_array=True' is not supported for CL < 1.2")
# if num_mip_levels is not None:
# raise TypeError(
# "'num_mip_levels' argument is not supported for CL < 1.2")
# if num_samples is not None:
# raise TypeError(
# "'num_samples' argument is not supported for CL < 1.2")
if buffer is not None:
raise TypeError("'buffer' argument is not supported for CL < 1.2")
image_old_init(self, context, flags, format, shape,
pitches, hostbuf)
class _ImageInfoGetter:
def __init__(self, event):
from warnings import warn
warn("Image.image.attr is deprecated. "
"Use Image.attr directly, instead.")
self.event = event
def __getattr__(self, name):
try:
inf_attr = getattr(_cl.image_info, name.upper())
except AttributeError:
raise AttributeError("%s has no attribute '%s'"
% (type(self), name))
else:
return self.event.get_image_info(inf_attr)
def image_shape(self):
if self.type == mem_object_type.IMAGE2D:
return (self.width, self.height)
elif self.type == mem_object_type.IMAGE3D:
return (self.width, self.height, self.depth)
else:
raise LogicError("only images have shapes")
Image.__init__ = image_init
Image.image = property(_ImageInfoGetter)
Image.shape = property(image_shape)
# }}}
# {{{ Error
def error_str(self):
val = self.what
try:
val.routine
except AttributeError:
return str(val)
else:
result = ""
if val.code() != status_code.SUCCESS:
result = status_code.to_string(
val.code(), "<unknown error %d>")
routine = val.routine()
if routine:
result = "%s failed: %s" % (routine, result)
what = val.what()
if what:
if result:
result += " - "
result += what
return result
def error_code(self):
return self.args[0].code()
def error_routine(self):
return self.args[0].routine()
def error_what(self):
return self.args[0]
Error.__str__ = error_str
Error.code = property(error_code)
Error.routine = property(error_routine)
Error.what = property(error_what)
# }}}
# {{{ MemoryMap
def memory_map_enter(self):
return self
def memory_map_exit(self, exc_type, exc_val, exc_tb):
self.release()
MemoryMap.__doc__ = """
This class may also be used as a context manager in a ``with`` statement.
The memory corresponding to this object will be unmapped when
this object is deleted or :meth:`release` is called.
.. automethod:: release
"""
MemoryMap.__enter__ = memory_map_enter
MemoryMap.__exit__ = memory_map_exit
# }}}
# {{{ SVMAllocation
if get_cl_header_version() >= (2, 0):
SVMAllocation.__doc__ = """An object whose lifetime is tied to an allocation of shared virtual memory.
.. note::
Most likely, you will not want to use this directly, but rather
:func:`svm_empty` and related functions which allow access to this
functionality using a friendlier, more Pythonic interface.
.. versionadded:: 2016.2
.. automethod:: __init__(self, ctx, size, alignment, flags=None)
.. automethod:: release
.. automethod:: enqueue_release
"""
if get_cl_header_version() >= (2, 0):
svmallocation_old_init = SVMAllocation.__init__
def svmallocation_init(self, ctx, size, alignment, flags, _interface=None):
"""
:arg ctx: a :class:`Context`
:arg flags: some of :class:`svm_mem_flags`.
"""
svmallocation_old_init(self, ctx, size, alignment, flags)
read_write = (
flags & mem_flags.WRITE_ONLY != 0
or flags & mem_flags.READ_WRITE != 0)
_interface["data"] = (
int(self._ptr_as_int()), not read_write)
self.__array_interface__ = _interface
if get_cl_header_version() >= (2, 0):
SVMAllocation.__init__ = svmallocation_init
# }}}
# {{{ SVM
if get_cl_header_version() >= (2, 0):
SVM.__doc__ = """Tags an object exhibiting the Python buffer interface (such as a
:class:`numpy.ndarray`) as referring to shared virtual memory.
Depending on the features of the OpenCL implementation, the following
types of objects may be passed to/wrapped in this type:
* coarse-grain shared memory as returned by (e.g.) :func:`csvm_empty`
for any implementation of OpenCL 2.0.
This is how coarse-grain SVM may be used from both host and device::
svm_ary = cl.SVM(
cl.csvm_empty(ctx, 1000, np.float32, alignment=64))
assert isinstance(svm_ary.mem, np.ndarray)
with svm_ary.map_rw(queue) as ary:
ary.fill(17) # use from host
prg.twice(queue, svm_ary.mem.shape, None, svm_ary)
* fine-grain shared memory as returned by (e.g.) :func:`fsvm_empty`,
if the implementation supports fine-grained shared virtual memory.
This memory may directly be passed to a kernel::
ary = cl.fsvm_empty(ctx, 1000, np.float32)
assert isinstance(ary, np.ndarray)
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish() # synchronize
print(ary) # access from host
Observe how mapping (as needed in coarse-grain SVM) is no longer
necessary.
* any :class:`numpy.ndarray` (or other Python object with a buffer
interface) if the implementation supports fine-grained *system*
shared virtual memory.
This is how plain :mod:`numpy` arrays may directly be passed to a
kernel::
ary = np.zeros(1000, np.float32)
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish() # synchronize
print(ary) # access from host
Objects of this type may be passed to kernel calls and
:func:`enqueue_copy`. Coarse-grain shared-memory *must* be mapped
into host address space using :meth:`map` before being accessed
through the :mod:`numpy` interface.
.. note::
This object merely serves as a 'tag' that changes the behavior
of functions to which it is passed. It has no special management
relationship to the memory it tags. For example, it is permissible
to grab a :mod:`numpy.array` out of :attr:`SVM.mem` of one
:class:`SVM` instance and use the array to construct another.
Neither of the tags need to be kept alive.
.. versionadded:: 2016.2
.. attribute:: mem
The wrapped object.
.. automethod:: __init__
.. automethod:: map
.. automethod:: map_ro
.. automethod:: map_rw
.. automethod:: as_buffer
"""
if get_cl_header_version() >= (2, 0):
svm_old_init = SVM.__init__
def svm_init(self, mem):
svm_old_init(self, mem)
self.mem = mem
def svm_map(self, queue, flags, is_blocking=True, wait_for=None):
"""
:arg is_blocking: If *False*, subsequent code must wait on
:attr:`SVMMap.event` in the returned object before accessing the
mapped memory.
:arg flags: a combination of :class:`pyopencl.map_flags`, defaults to
read-write.
:returns: an :class:`SVMMap` instance
|std-enqueue-blurb|
"""
return SVMMap(
self,
queue,
_cl._enqueue_svm_map(queue, is_blocking, flags, self, wait_for))
def svm_map_ro(self, queue, is_blocking=True, wait_for=None):
"""Like :meth:`map`, but with *flags* set for a read-only map."""
return self.map(queue, map_flags.READ,
is_blocking=is_blocking, wait_for=wait_for)
def svm_map_rw(self, queue, is_blocking=True, wait_for=None):
"""Like :meth:`map`, but with *flags* set for a read-only map."""
return self.map(queue, map_flags.READ | map_flags.WRITE,
is_blocking=is_blocking, wait_for=wait_for)
def svm__enqueue_unmap(self, queue, wait_for=None):
return _cl._enqueue_svm_unmap(queue, self, wait_for)
def svm_as_buffer(self, ctx, flags=None):
"""
:arg ctx: a :class:`Context`
:arg flags: a combination of :class:`pyopencl.map_flags`, defaults to
read-write.
:returns: a :class:`Buffer` corresponding to *self*.
The memory referred to by this object must not be freed before
the returned :class:`Buffer` is released.
"""
if flags is None:
flags = mem_flags.READ_WRITE
return Buffer(ctx, flags, size=self.mem.nbytes, hostbuf=self.mem)
if get_cl_header_version() >= (2, 0):
SVM.__init__ = svm_init
SVM.map = svm_map
SVM.map_ro = svm_map_ro
SVM.map_rw = svm_map_rw
SVM._enqueue_unmap = svm__enqueue_unmap
SVM.as_buffer = svm_as_buffer
# }}}
# ORDER DEPENDENCY: Some of the above may override get_info, the effect needs
# to be visible through the attributes. So get_info attr creation needs to happen
# after the overriding is complete.
cls_to_info_cls = {
_cl.Platform: (_cl.Platform.get_info, _cl.platform_info, []),
_cl.Device: (_cl.Device.get_info, _cl.device_info,
["PLATFORM", "MAX_WORK_GROUP_SIZE", "MAX_COMPUTE_UNITS"]),
_cl.Context: (_cl.Context.get_info, _cl.context_info, []),
_cl.CommandQueue: (_cl.CommandQueue.get_info, _cl.command_queue_info,
["CONTEXT", "DEVICE"]),
_cl.Event: (_cl.Event.get_info, _cl.event_info, []),
_cl.MemoryObjectHolder:
(MemoryObjectHolder.get_info, _cl.mem_info, []),
Image: (_cl.Image.get_image_info, _cl.image_info, []),
Program: (Program.get_info, _cl.program_info, []),
Kernel: (Kernel.get_info, _cl.kernel_info, []),
_cl.Sampler: (Sampler.get_info, _cl.sampler_info, []),
}
def to_string(cls, value, default_format=None):
for name in dir(cls):
if (not name.startswith("_") and getattr(cls, name) == value):
return name
if default_format is None:
raise ValueError("a name for value %d was not found in %s"
% (value, cls.__name__))
else:
return default_format % value
for cls in CONSTANT_CLASSES:
cls.to_string = classmethod(to_string)
# {{{ get_info attributes -------------------------------------------------
def make_getinfo(info_method, info_name, info_attr):
def result(self):
return info_method(self, info_attr)
return property(result)
def make_cacheable_getinfo(info_method, info_name, cache_attr, info_attr):
def result(self):
try:
return getattr(self, cache_attr)
except AttributeError:
pass
result = info_method(self, info_attr)
setattr(self, cache_attr, result)
return result
return property(result)
for cls, (info_method, info_class, cacheable_attrs) \
in six.iteritems(cls_to_info_cls):
for info_name, info_value in six.iteritems(info_class.__dict__):
if info_name == "to_string" or info_name.startswith("_"):
continue
info_lower = info_name.lower()
info_constant = getattr(info_class, info_name)
if info_name in cacheable_attrs:
cache_attr = intern("_info_cache_"+info_lower)
setattr(cls, info_lower, make_cacheable_getinfo(
info_method, info_lower, cache_attr, info_constant))
else:
setattr(cls, info_lower, make_getinfo(
info_method, info_name, info_constant))
# }}}
if _cl.have_gl():
def gl_object_get_gl_object(self):
return self.get_gl_object_info()[1]
GLBuffer.gl_object = property(gl_object_get_gl_object)
GLTexture.gl_object = property(gl_object_get_gl_object)
_add_functionality()
# }}}
# {{{ create_some_context
def create_some_context(interactive=None, answers=None):
import os
if answers is None:
if "PYOPENCL_CTX" in os.environ:
ctx_spec = os.environ["PYOPENCL_CTX"]
answers = ctx_spec.split(":")
if "PYOPENCL_TEST" in os.environ:
from pyopencl.tools import get_test_platforms_and_devices
for plat, devs in get_test_platforms_and_devices():
for dev in devs:
return Context([dev])
if answers is not None:
pre_provided_answers = answers
answers = answers[:]
else:
pre_provided_answers = None
user_inputs = []
if interactive is None:
interactive = True
try:
import sys
if not sys.stdin.isatty():
interactive = False
except Exception:
interactive = False
def cc_print(s):
if interactive:
print(s)
def get_input(prompt):
if answers:
return str(answers.pop(0))
elif not interactive:
return ''
else:
user_input = input(prompt)
user_inputs.append(user_input)
return user_input
# {{{ pick a platform
platforms = get_platforms()
if not platforms:
raise Error("no platforms found")
else:
if not answers:
cc_print("Choose platform:")
for i, pf in enumerate(platforms):
cc_print("[%d] %s" % (i, pf))
answer = get_input("Choice [0]:")
if not answer:
platform = platforms[0]
else:
platform = None
try:
int_choice = int(answer)
except ValueError:
pass
else:
if 0 <= int_choice < len(platforms):
platform = platforms[int_choice]
if platform is None:
answer = answer.lower()
for i, pf in enumerate(platforms):
if answer in pf.name.lower():
platform = pf
if platform is None:
raise RuntimeError("input did not match any platform")
# }}}
# {{{ pick a device
devices = platform.get_devices()
def parse_device(choice):
try:
int_choice = int(choice)
except ValueError:
pass
else:
if 0 <= int_choice < len(devices):
return devices[int_choice]
choice = choice.lower()
for i, dev in enumerate(devices):
if choice in dev.name.lower():
return dev
raise RuntimeError("input did not match any device")
if not devices:
raise Error("no devices found")
elif len(devices) == 1:
pass
else:
if not answers:
cc_print("Choose device(s):")
for i, dev in enumerate(devices):
cc_print("[%d] %s" % (i, dev))
answer = get_input("Choice, comma-separated [0]:")
if not answer:
devices = [devices[0]]
else:
devices = [parse_device(i) for i in answer.split(",")]
# }}}
if user_inputs:
if pre_provided_answers is not None:
user_inputs = pre_provided_answers + user_inputs
cc_print("Set the environment variable PYOPENCL_CTX='%s' to "
"avoid being asked again." % ":".join(user_inputs))
if answers:
raise RuntimeError("not all provided choices were used by "
"create_some_context. (left over: '%s')" % ":".join(answers))
return Context(devices)
_csc = create_some_context
# }}}
# {{{ SVMMap
class SVMMap(object):
"""
.. attribute:: event
.. versionadded:: 2016.2
.. automethod:: release
This class may also be used as a context manager in a ``with`` statement.
:meth:`release` will be called upon exit from the ``with`` region.
The value returned to the ``as`` part of the context manager is the
mapped Python object (e.g. a :mod:`numpy` array).
"""
def __init__(self, svm, queue, event):
self.svm = svm
self.queue = queue
self.event = event
def __del__(self):
if self.svm is not None:
self.release()
def __enter__(self):
return self.svm.mem
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def release(self, queue=None, wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`. Defaults to the one
with which the map was created, if not specified.
:returns: a :class:`pyopencl.Event`
|std-enqueue-blurb|
"""
evt = self.svm._enqueue_unmap(self.queue)
self.svm = None
return evt
# }}}
# {{{ enqueue_copy
def enqueue_copy(queue, dest, src, **kwargs):
"""Copy from :class:`Image`, :class:`Buffer` or the host to
:class:`Image`, :class:`Buffer` or the host. (Note: host-to-host
copies are unsupported.)
The following keyword arguments are available:
:arg wait_for: (optional, default empty)
:arg is_blocking: Wait for completion. Defaults to *True*.
(Available on any copy involving host memory)
:return: A :class:`NannyEvent` if the transfer involved a
host-side buffer, otherwise an :class:`Event`.
.. note::
Be aware that when the deletion of the :class:`NannyEvent` that is
returned by the function if the transfer involved a host-side buffer
will block until the transfer is complete, so be sure to keep a
reference to this :class:`Event` as long as necessary for the
transfer to complete.
.. note::
Two types of 'buffer' occur in the arguments to this function,
:class:`Buffer` and 'host-side buffers'. The latter are
defined by Python and commonly called `buffer objects
<https://docs.python.org/3.4/c-api/buffer.html>`_. :mod:`numpy`
arrays are a very common example.
Make sure to always be clear on whether a :class:`Buffer` or a
Python buffer object is needed.
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`Buffer` ↔ host
.. ------------------------------------------------------------------------
:arg device_offset: offset in bytes (optional)
.. note::
The size of the transfer is controlled by the size of the
of the host-side buffer. If the host-side buffer
is a :class:`numpy.ndarray`, you can control the transfer size by
transfering into a smaller 'view' of the target array, like this::
cl.enqueue_copy(queue, large_dest_numpy_array[:15], src_buffer)
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`Buffer` ↔ :class:`Buffer`
.. ------------------------------------------------------------------------
:arg byte_count: (optional) If not specified, defaults to the
size of the source in versions 2012.x and earlier,
and to the minimum of the size of the source and target
from 2013.1 on.
:arg src_offset: (optional)
:arg dest_offset: (optional)
.. ------------------------------------------------------------------------
.. rubric :: Rectangular :class:`Buffer` ↔ host transfers (CL 1.1 and newer)
.. ------------------------------------------------------------------------
:arg buffer_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg host_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg region: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg buffer_pitches: :class:`tuple` of :class:`int` of length
two or shorter. (optional, "tightly-packed" if unspecified)
:arg host_pitches: :class:`tuple` of :class:`int` of length
two or shorter. (optional, "tightly-packed" if unspecified)
.. ------------------------------------------------------------------------
.. rubric :: Rectangular :class:`Buffer` ↔ :class:`Buffer`
transfers (CL 1.1 and newer)
.. ------------------------------------------------------------------------
:arg src_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg dst_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg region: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg src_pitches: :class:`tuple` of :class:`int` of length
two or shorter. (optional, "tightly-packed" if unspecified)
:arg dst_pitches: :class:`tuple` of :class:`int` of length
two or shorter. (optional, "tightly-packed" if unspecified)
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`Image` ↔ host
.. ------------------------------------------------------------------------
:arg origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg region: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg pitches: :class:`tuple` of :class:`int` of length
two or shorter. (optional)
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`Buffer` ↔ :class:`Image`
.. ------------------------------------------------------------------------
:arg offset: offset in buffer (mandatory)
:arg origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg region: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`Image` ↔ :class:`Image`
.. ------------------------------------------------------------------------
:arg src_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg dest_origin: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
:arg region: :class:`tuple` of :class:`int` of length
three or shorter. (mandatory)
.. ------------------------------------------------------------------------
.. rubric :: Transfer :class:`SVM`/host ↔ :class:`SVM`/host
.. ------------------------------------------------------------------------
:arg byte_count: (optional) If not specified, defaults to the
size of the source in versions 2012.x and earlier,
and to the minimum of the size of the source and target
from 2013.1 on.
|std-enqueue-blurb|
.. versionadded:: 2011.1
"""
if isinstance(dest, MemoryObjectHolder):
if dest.type == mem_object_type.BUFFER:
if isinstance(src, MemoryObjectHolder):
if src.type == mem_object_type.BUFFER:
if "src_origin" in kwargs:
return _cl._enqueue_copy_buffer_rect(
queue, src, dest, **kwargs)
else:
kwargs["dst_offset"] = kwargs.pop("dest_offset", 0)
return _cl._enqueue_copy_buffer(queue, src, dest, **kwargs)
elif src.type in [mem_object_type.IMAGE2D, mem_object_type.IMAGE3D]:
return _cl._enqueue_copy_image_to_buffer(
queue, src, dest, **kwargs)
else:
raise ValueError("invalid src mem object type")
else:
# assume from-host
if "buffer_origin" in kwargs:
return _cl._enqueue_write_buffer_rect(queue, dest, src, **kwargs)
else:
return _cl._enqueue_write_buffer(queue, dest, src, **kwargs)
elif dest.type in [mem_object_type.IMAGE2D, mem_object_type.IMAGE3D]:
if isinstance(src, MemoryObjectHolder):
if src.type == mem_object_type.BUFFER:
return _cl._enqueue_copy_buffer_to_image(
queue, src, dest, **kwargs)
elif src.type in [mem_object_type.IMAGE2D, mem_object_type.IMAGE3D]:
return _cl._enqueue_copy_image(queue, src, dest, **kwargs)
else:
raise ValueError("invalid src mem object type")
else:
# assume from-host
origin = kwargs.pop("origin")
region = kwargs.pop("region")
pitches = kwargs.pop("pitches", (0, 0))
if len(pitches) == 1:
kwargs["row_pitch"], = pitches
else:
kwargs["row_pitch"], kwargs["slice_pitch"] = pitches
return _cl._enqueue_write_image(
queue, dest, origin, region, src, **kwargs)
else:
raise ValueError("invalid dest mem object type")
elif get_cl_header_version() >= (2, 0) and isinstance(dest, SVM):
# to SVM
if not isinstance(src, SVM):
src = SVM(src)
is_blocking = kwargs.pop("is_blocking", True)
return _cl._enqueue_svm_memcpy(queue, is_blocking, dest, src, **kwargs)
else:
# assume to-host
if isinstance(src, MemoryObjectHolder):
if src.type == mem_object_type.BUFFER:
if "buffer_origin" in kwargs:
return _cl._enqueue_read_buffer_rect(queue, src, dest, **kwargs)
else:
return _cl._enqueue_read_buffer(queue, src, dest, **kwargs)
elif src.type in [mem_object_type.IMAGE2D, mem_object_type.IMAGE3D]:
origin = kwargs.pop("origin")
region = kwargs.pop("region")
pitches = kwargs.pop("pitches", (0, 0))
if len(pitches) == 1:
kwargs["row_pitch"], = pitches
else:
kwargs["row_pitch"], kwargs["slice_pitch"] = pitches
return _cl._enqueue_read_image(
queue, src, origin, region, dest, **kwargs)
else:
raise ValueError("invalid src mem object type")
elif isinstance(src, SVM):
# from svm
# dest is not a SVM instance, otherwise we'd be in the branch above
is_blocking = kwargs.pop("is_blocking", True)
return _cl._enqueue_svm_memcpy(
queue, is_blocking, SVM(dest), src, **kwargs)
else:
# assume from-host
raise TypeError("enqueue_copy cannot perform host-to-host transfers")
# }}}
# {{{ image creation
DTYPE_TO_CHANNEL_TYPE = {
np.dtype(np.float32): channel_type.FLOAT,
np.dtype(np.int16): channel_type.SIGNED_INT16,
np.dtype(np.int32): channel_type.SIGNED_INT32,
np.dtype(np.int8): channel_type.SIGNED_INT8,
np.dtype(np.uint16): channel_type.UNSIGNED_INT16,
np.dtype(np.uint32): channel_type.UNSIGNED_INT32,
np.dtype(np.uint8): channel_type.UNSIGNED_INT8,
}
try:
np.float16
except Exception:
pass
else:
DTYPE_TO_CHANNEL_TYPE[np.dtype(np.float16)] = channel_type.HALF_FLOAT
DTYPE_TO_CHANNEL_TYPE_NORM = {
np.dtype(np.int16): channel_type.SNORM_INT16,
np.dtype(np.int8): channel_type.SNORM_INT8,
np.dtype(np.uint16): channel_type.UNORM_INT16,
np.dtype(np.uint8): channel_type.UNORM_INT8,
}
def image_from_array(ctx, ary, num_channels=None, mode="r", norm_int=False):
if not ary.flags.c_contiguous:
raise ValueError("array must be C-contiguous")
dtype = ary.dtype
if num_channels is None:
import pyopencl.cltypes
try:
dtype, num_channels = \
pyopencl.cltypes.vec_type_to_scalar_and_count[dtype]
except KeyError:
# It must be a scalar type then.
num_channels = 1
shape = ary.shape
strides = ary.strides
elif num_channels == 1:
shape = ary.shape
strides = ary.strides
else:
if ary.shape[-1] != num_channels:
raise RuntimeError("last dimension must be equal to number of channels")
shape = ary.shape[:-1]
strides = ary.strides[:-1]
if mode == "r":
mode_flags = mem_flags.READ_ONLY
elif mode == "w":
mode_flags = mem_flags.WRITE_ONLY
else:
raise ValueError("invalid value '%s' for 'mode'" % mode)
img_format = {
1: channel_order.R,
2: channel_order.RG,
3: channel_order.RGB,
4: channel_order.RGBA,
}[num_channels]
assert ary.strides[-1] == ary.dtype.itemsize
if norm_int:
channel_type = DTYPE_TO_CHANNEL_TYPE_NORM[dtype]
else:
channel_type = DTYPE_TO_CHANNEL_TYPE[dtype]
return Image(ctx, mode_flags | mem_flags.COPY_HOST_PTR,
ImageFormat(img_format, channel_type),
shape=shape[::-1], pitches=strides[::-1][1:],
hostbuf=ary)
# }}}
# {{{ enqueue_* compatibility shims
def enqueue_marker(queue, wait_for=None):
if queue._get_cl_version() >= (1, 2) and get_cl_header_version() >= (1, 2):
return _cl._enqueue_marker_with_wait_list(queue, wait_for)
else:
if wait_for:
_cl._enqueue_wait_for_events(queue, wait_for)
return _cl._enqueue_marker(queue)
def enqueue_barrier(queue, wait_for=None):
if queue._get_cl_version() >= (1, 2) and get_cl_header_version() >= (1, 2):
return _cl._enqueue_barrier_with_wait_list(queue, wait_for)
else:
_cl._enqueue_barrier(queue)
if wait_for:
_cl._enqueue_wait_for_events(queue, wait_for)
return _cl._enqueue_marker(queue)
def enqueue_fill_buffer(queue, mem, pattern, offset, size, wait_for=None):
if not (queue._get_cl_version() >= (1, 2) and get_cl_header_version() >= (1, 2)):
from warnings import warn
warn("The context for this queue does not declare OpenCL 1.2 support, so "
"the next thing you might see is a crash")
if _PYPY and isinstance(pattern, np.generic):
pattern = np.asarray(pattern)
return _cl._enqueue_fill_buffer(queue, mem, pattern, offset, size, wait_for)
# }}}
# {{{ numpy-like svm allocation
def enqueue_svm_memfill(queue, dest, pattern, byte_count=None, wait_for=None):
"""Fill shared virtual memory with a pattern.
:arg dest: a Python buffer object, optionally wrapped in an :class:`SVM` object
:arg pattern: a Python buffer object (e.g. a :class:`numpy.ndarray` with the
fill pattern to be used.
:arg byte_count: The size of the memory to be fill. Defaults to the
entirety of *dest*.
|std-enqueue-blurb|
.. versionadded:: 2016.2
"""
if not isinstance(dest, SVM):
dest = SVM(dest)
return _cl._enqueue_svm_memfill(
queue, dest, pattern, byte_count=None, wait_for=None)
def enqueue_svm_migratemem(queue, svms, flags, wait_for=None):
"""
:arg svms: a collection of Python buffer objects (e.g. :mod:`numpy`
arrrays), optionally wrapped in :class:`SVM` objects.
:arg flags: a combination of :class:`mem_migration_flags`
|std-enqueue-blurb|
.. versionadded:: 2016.2
This function requires OpenCL 2.1.
"""
return _cl._enqueue_svm_migratemem(
queue,
[svm.mem if isinstance(svm, SVM) else svm
for svm in svms],
flags,
wait_for)
def svm_empty(ctx, flags, shape, dtype, order="C", alignment=None):
"""Allocate an empty :class:`numpy.ndarray` of the given *shape*, *dtype*
and *order*. (See :func:`numpy.empty` for the meaning of these arguments.)
The array will be allocated in shared virtual memory belonging
to *ctx*.
:arg ctx: a :class:`Context`
:arg flags: a combination of flags from :class:`svm_mem_flags`.
:arg alignment: the number of bytes to which the beginning of the memory
is aligned. Defaults to the :attr:`numpy.dtype.itemsize` of *dtype*.
:returns: a :class:`numpy.ndarray` whose :attr:`numpy.ndarray.base` attribute
is a :class:`SVMAllocation`.
To pass the resulting array to an OpenCL kernel or :func:`enqueue_copy`, you
will likely want to wrap the returned array in an :class:`SVM` tag.
.. versionadded:: 2016.2
"""
dtype = np.dtype(dtype)
try:
s = 1
for dim in shape:
s *= dim
except TypeError:
import sys
if sys.version_info >= (3,):
admissible_types = (int, np.integer)
else:
admissible_types = (np.integer,) + six.integer_types
if not isinstance(shape, admissible_types):
raise TypeError("shape must either be iterable or "
"castable to an integer")
s = shape
shape = (shape,)
itemsize = dtype.itemsize
nbytes = s * itemsize
from pyopencl.compyte.array import c_contiguous_strides, f_contiguous_strides
if order in "fF":
strides = f_contiguous_strides(itemsize, shape)
elif order in "cC":
strides = c_contiguous_strides(itemsize, shape)
else:
raise ValueError("order not recognized: %s" % order)
descr = dtype.descr
interface = {
"version": 3,
"shape": shape,
"strides": strides,
}
if len(descr) == 1:
interface["typestr"] = descr[0][1]
else:
interface["typestr"] = "V%d" % itemsize
interface["descr"] = descr
if alignment is None:
alignment = itemsize
svm_alloc = SVMAllocation(ctx, nbytes, alignment, flags, _interface=interface)
return np.asarray(svm_alloc)
def svm_empty_like(ctx, flags, ary, alignment=None):
"""Allocate an empty :class:`numpy.ndarray` like the existing
:class:`numpy.ndarray` *ary*. The array will be allocated in shared
virtual memory belonging to *ctx*.
:arg ctx: a :class:`Context`
:arg flags: a combination of flags from :class:`svm_mem_flags`.
:arg alignment: the number of bytes to which the beginning of the memory
is aligned. Defaults to the :attr:`numpy.dtype.itemsize` of *dtype*.
:returns: a :class:`numpy.ndarray` whose :attr:`numpy.ndarray.base` attribute
is a :class:`SVMAllocation`.
To pass the resulting array to an OpenCL kernel or :func:`enqueue_copy`, you
will likely want to wrap the returned array in an :class:`SVM` tag.
.. versionadded:: 2016.2
"""
if ary.flags.c_contiguous:
order = "C"
elif ary.flags.f_contiguous:
order = "F"
else:
raise ValueError("array is neither C- nor Fortran-contiguous")
return svm_empty(ctx, flags, ary.shape, ary.dtype, order,
alignment=alignment)
def csvm_empty(ctx, shape, dtype, order="C", alignment=None):
"""
Like :func:`svm_empty`, but with *flags* set for a coarse-grain read-write
buffer.
.. versionadded:: 2016.2
"""
return svm_empty(ctx, svm_mem_flags.READ_WRITE, shape, dtype, order, alignment)
def csvm_empty_like(ctx, ary, alignment=None):
"""
Like :func:`svm_empty_like`, but with *flags* set for a coarse-grain
read-write buffer.
.. versionadded:: 2016.2
"""
return svm_empty_like(ctx, svm_mem_flags.READ_WRITE, ary)
def fsvm_empty(ctx, shape, dtype, order="C", alignment=None):
"""
Like :func:`svm_empty`, but with *flags* set for a fine-grain read-write
buffer.
.. versionadded:: 2016.2
"""
return svm_empty(ctx,
svm_mem_flags.READ_WRITE | svm_mem_flags.SVM_FINE_GRAIN_BUFFER,
shape, dtype, order, alignment)
def fsvm_empty_like(ctx, ary, alignment=None):
"""
Like :func:`svm_empty_like`, but with *flags* set for a fine-grain
read-write buffer.
.. versionadded:: 2016.2
"""
return svm_empty_like(
ctx,
svm_mem_flags.READ_WRITE | svm_mem_flags.SVM_FINE_GRAIN_BUFFER,
ary)
# }}}
_KERNEL_ARG_CLASSES = (
MemoryObjectHolder,
Sampler,
CommandQueue,
LocalMemory,
)
if get_cl_header_version() >= (2, 0):
_KERNEL_ARG_CLASSES = _KERNEL_ARG_CLASSES + (SVM,)
# vim: foldmethod=marker
| StarcoderdataPython |
11238085 | from datasets.basic_dataset_scaffold import BaseDataset
import os, numpy as np, pandas as pd
def Give(opt, datapath):
data_info = np.array(pd.read_table(datapath+'/Eval/list_eval_partition.txt', header=1, delim_whitespace=True))[1:,:]
train, query, gallery = data_info[data_info[:,2]=='train'][:,:2], data_info[data_info[:,2]=='query'][:,:2], data_info[data_info[:,2]=='gallery'][:,:2]
lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in train[:,1]])))}
train[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in train[:,1]])
lab_conv = {x:i for i,x in enumerate(np.unique(np.array([int(x.split('_')[-1]) for x in np.concatenate([query[:,1], gallery[:,1]])])))}
query[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in query[:,1]])
gallery[:,1] = np.array([lab_conv[int(x.split('_')[-1])] for x in gallery[:,1]])
train_image_dict = {}
for img_path, key in train:
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(datapath+'/'+img_path)
query_image_dict = {}
for img_path, key in query:
if not key in query_image_dict.keys():
query_image_dict[key] = []
query_image_dict[key].append(datapath+'/'+img_path)
gallery_image_dict = {}
for img_path, key in gallery:
if not key in gallery_image_dict.keys():
gallery_image_dict[key] = []
gallery_image_dict[key].append(datapath+'/'+img_path)
super_train_image_dict, counter, super_assign = {},0,{}
for img_path, _ in train:
key = '_'.join(img_path.split('/')[1:3])
if key not in super_assign.keys():
super_assign[key] = counter
counter += 1
key = super_assign[key]
if not key in super_train_image_dict.keys():
super_train_image_dict[key] = []
super_train_image_dict[key].append(datapath+'/'+img_path)
query_keys = list(query_image_dict.keys())
gallery_keys = list(gallery_image_dict.keys())
if opt.train_val_split!=1:
#NOTE: In In-Shop, training-validation split by class is generally disallowed due to classes having very low membernumbers!
train_val_split = int(len(query_keys)*opt.train_val_split)
train, val = query_keys[:train_val_split], query_keys[train_val_split:]
query_train, gallery_train = train[:len(train)//2], train[len(train)//2:]
query_val, gallery_val = val[:len(val)//2], val[len(val)//2:]
query_image_dict_train, query_image_dict_val = {key:train_image_dict[key] for key in query_train},{key:train_image_dict[key] for key in query_val}
gallery_image_dict_train, gallery_image_dict_val = {key:train_image_dict[key] for key in gallery_train},{key:train_image_dict[key] for key in gallery_val}
query_dataset_val = BaseDataset(query_image_dict_val, opt, is_validation=True)
gallery_dataset_val = BaseDataset(gallery_image_dict_val, opt, is_validation=True)
else:
query_image_dict_train, gallery_image_dict_train = query_image_dict, gallery_image_dict
query_dataset_val, gallery_dataset_val = None, None
train_dataset = BaseDataset(train_image_dict, opt)
super_train_dataset = BaseDataset(super_train_image_dict, opt, is_validation=True)
eval_dataset = BaseDataset(train_image_dict, opt, is_validation=True)
query_dataset_train = BaseDataset(query_image_dict, opt, is_validation=True)
gallery_dataset_train = BaseDataset(gallery_image_dict, opt, is_validation=True)
return {'training':train_dataset, 'testing_query':query_dataset_train, 'evaluation':eval_dataset,
'validation_query':query_dataset_val, 'validation_gallery':gallery_dataset_val,
'testing_gallery':gallery_dataset_train, 'super_evaluation':super_train_dataset}
| StarcoderdataPython |
6554803 | import os
import pickle
from slu import constants as const
def save_label_encoder(model_dir, encoder):
with open(os.path.join(model_dir, const.S_INTNET_LABEL_ENCODER), "wb") as handle:
pickle.dump(encoder, handle)
def read_label_encoder(model_dir):
with open(os.path.join(model_dir, const.S_INTNET_LABEL_ENCODER), "rb") as handle:
return pickle.load(handle)
def save_intent_labels(model_dir, labels):
with open(os.path.join(model_dir, const.S_ENTITY_LABELS), "wb") as f:
pickle.dump(labels, f)
def read_intent_labels(model_dir):
with open(os.path.join(model_dir, const.S_ENTITY_LABELS), "rb") as f:
pickle.load(f)
| StarcoderdataPython |
11203015 | <reponame>trainorpj/probability
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the AutoGraph frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.internal.auto_batching import frontend
from tensorflow_probability.python.internal.auto_batching import instructions
from tensorflow_probability.python.internal.auto_batching import numpy_backend
from tensorflow_probability.python.internal.auto_batching import tf_backend
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
TF_BACKEND = tf_backend.TensorFlowBackend()
NP_BACKEND = numpy_backend.NumpyBackend()
# Eensy weensy test function
def fibonacci(n):
if n <= 1:
return 1
else:
left = fibonacci(n - 2)
right = fibonacci(n - 1)
return left + right
@test_util.run_all_in_graph_and_eager_modes
class AutoGraphFrontendTest(tf.test.TestCase):
def testFibonacci(self):
self.assertEqual(1, fibonacci(0))
self.assertEqual(1, fibonacci(1))
self.assertEqual(2, fibonacci(2))
self.assertEqual(3, fibonacci(3))
self.assertEqual(5, fibonacci(4))
self.assertEqual(8, fibonacci(5))
self.assertEqual(13, fibonacci(6))
self.assertEqual(21, fibonacci(7))
self.assertEqual(34, fibonacci(8))
self.assertEqual(55, fibonacci(9))
def testFibonacciNumpy(self):
batch_fibo = frontend.Context().batch_uncurried(
fibonacci,
lambda *args: instructions.TensorType(np.int64, ()))
self.assertEqual(
[13, 21, 34, 55],
list(batch_fibo(np.array([6, 7, 8, 9], dtype=np.int64),
max_stack_depth=15, backend=NP_BACKEND)))
def testFibonacciNumpyStackless(self):
if not tf.executing_eagerly():
return
batch_fibo = frontend.Context().batch_uncurried(
fibonacci,
lambda *args: instructions.TensorType(np.int64, ()))
self.assertEqual(
[3, 21, 5, 8],
list(batch_fibo(np.array([3, 7, 4, 5], dtype=np.int64),
max_stack_depth=15, backend=NP_BACKEND,
stackless=True)))
def testEvenOddWithContext(self):
def pred_type(_):
return instructions.TensorType(np.int32, ())
ab = frontend.Context()
@ab.batch(type_inference=pred_type)
def even(n):
if n <= 0:
return True
else:
return odd(n - 1)
@ab.batch(type_inference=pred_type)
def odd(n):
if n <= 0:
return False
else:
return even(n - 1)
inputs = np.array([5, 6, 8, 9], dtype=np.int32)
outputs = np.array([False, True, True, False], dtype=np.bool)
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `even`
# are introduced by the `@ab.batch` decorator, confusing pylint.
self.assertAllEqual(
outputs,
self.evaluate(even(inputs, max_stack_depth=15, backend=TF_BACKEND)))
def testSyntheticMultipleValueReturns(self):
def id_type(args):
return args[0]
def function(x):
a, b, c = [1, 2, 3]
return x + a + b + c
batched = frontend.Context().batch_uncurried(function, id_type)
self.assertEqual(
[12, 13, 14],
list(batched(np.array([6, 7, 8], dtype=np.int64),
max_stack_depth=15, backend=NP_BACKEND)))
def testNestedMultipleValueReturns(self):
def my_primop():
return [([1, 2], 3), 4]
def id_type(args):
return args[0]
def function(x):
[(a, b), c], d = my_primop()
return x + a + b + c + d
batched = frontend.Context().batch_uncurried(function, id_type)
self.assertEqual(
[16, 17, 18],
list(batched(np.array([6, 7, 8], dtype=np.int64),
max_stack_depth=15, backend=NP_BACKEND)))
def testNamedTuples(self):
# Should be destructurable, and should be conserved
# - on input to the auto-batched program
# - on input to primops or functions
# - on output from primops or functions, and
# - on output from the auto-batched program.
my_tuple = collections.namedtuple('my_tuple', ['x', 'y'])
def my_primop(thing):
return my_tuple(thing.x + 1, thing.y + 2)
def id_type(args):
return args[0]
def function(x):
thing1 = my_primop(x)
thing2 = my_primop(thing1)
return thing2
def caller(x):
thing1 = function(x)
thing2 = function(thing1)
return thing2
ctx = frontend.Context()
ctx.batch_uncurried(function, id_type)
batched = ctx.batch_uncurried(caller, id_type)
input_ = my_tuple(np.array([6, 7, 8], dtype=np.int64),
np.array([60, 70, 80], dtype=np.int64))
output = my_tuple(np.array([10, 11, 12], dtype=np.int64),
np.array([68, 78, 88], dtype=np.int64))
result = batched(input_, max_stack_depth=15, backend=NP_BACKEND)
self.assertEqual(type(output), type(result))
self.assertAllEqual(output, result)
def testDisablingAutoBatching(self):
execution_counter_box = [0]
def count_executions(x):
# The autobatching system will unconditionally run this exactly thrice per
# occurrence in the source: once to infer the type, once to prove that
# type inference stabilized, and once during graph construction.
execution_counter_box[0] += 1
return x
def id_type(args):
return args[0]
def function(x):
true = True
if true:
return count_executions(x)
else:
return count_executions(x)
batched = frontend.Context().batch_uncurried(function, id_type)
# Running the original function should increment the box once
function(np.array([4]))
self.assertEqual(execution_counter_box[0], 1)
execution_counter_box[0] = 0
if tf.executing_eagerly():
# Running the batched version in eager mode should increment the box five
# times (twice per static occurrence and once for the time it actually
# executes)
expected_execution_count = 5
else:
# Running the batched version in graph mode should increment the box six
# times (thrice per occurrence)
expected_execution_count = 6
batched(np.array([4, 5, 6, 7, 8]))
self.assertEqual(execution_counter_box[0], expected_execution_count)
# Running the batched version in dry-run mode should increment the box once,
# because that should mimic the original function.
execution_counter_box[0] = 0
batched(np.array([4]), dry_run=True)
self.assertEqual(execution_counter_box[0], 1)
def testDisablingAutoBatchingNested(self):
execution_counter_box = [0]
def count_executions(x):
# The autobatching system will unconditionally run this exactly thrice per
# occurrence in the source: once to infer the type, once to prove that
# type inference stabilized, and once during graph construction.
execution_counter_box[0] += 1
return x
def id_type(args):
return args[0]
ctx = frontend.Context()
@ctx.batch(type_inference=id_type)
def function(x):
true = True
if true:
return count_executions(x)
else:
return count_executions(x)
@ctx.batch(type_inference=id_type)
def caller(x):
return function(x)
if tf.executing_eagerly():
# Running the batched version in eager mode should increment the box five
# times (twice per static occurrence and once for the time it actually
# executes)
expected_execution_count = 5
else:
# Running the batched version in graph mode should increment the box six
# times (thrice per occurrence)
expected_execution_count = 6
caller(np.array([4, 5, 6, 7, 8]))
self.assertEqual(execution_counter_box[0], expected_execution_count)
# Running the batched version in dry-run mode should increment the box once,
# because that should mimic the original function.
execution_counter_box[0] = 0
# pylint: disable=unexpected-keyword-arg
# The `dry_run` keyword argument to `caller` is introduced by the
# `@ctx.batch` decorator, confusing pylint.
caller(np.array([4]), dry_run=True)
self.assertEqual(execution_counter_box[0], 1)
def testDryRunIf(self):
def id_type(args):
return args[0]
truthy = frontend.truthy
def batch_abs(x):
if truthy(x > 0):
return x
else:
return -x
batched = frontend.Context().batch_uncurried(batch_abs, id_type)
inputs = np.array([12, -13, 14], dtype=np.int64)
self.assertAllEqual(
[12, 13, 14],
self.evaluate(batched(inputs, max_stack_depth=15, backend=TF_BACKEND)))
# Note: trying to dry-run control flow will only work in Eager mode, because
# Graph-mode Tensors cannot be used as `if` conditions at all.
if tf.executing_eagerly():
self.assertEqual([12], self.evaluate(
batched(tf.constant([12]), dry_run=True)))
self.assertEqual([13], self.evaluate(
batched(tf.constant([-13]), dry_run=True)))
def testConsumeEmitMultipleValues(self):
def dup_type(args):
return args[0]
def function(inp):
x, y = inp
return x + 1, y + 2
batched = frontend.Context().batch_uncurried(function, dup_type)
inputs = (np.array([12, -13, 14], dtype=np.int32),
np.array([[4, 3], [3, 2], [2, 1]], dtype=np.int32))
expected_outputs = ([13, -12, 15], [[6, 5], [5, 4], [4, 3]])
got_outputs = self.evaluate(
batched(inputs, max_stack_depth=15, backend=TF_BACKEND))
self.assertEqual(len(expected_outputs), len(got_outputs))
for exp, got in zip(expected_outputs, got_outputs):
self.assertAllEqual(exp, got)
def testConsumeEmitMultipleValuesNested(self):
def dup_type(args):
return args[0]
ctx = frontend.Context()
@ctx.batch(type_inference=dup_type)
def function(inp):
x, y = inp
return x + 1, y + 2
@ctx.batch(type_inference=dup_type)
def caller(inp):
ans1, ans2 = function(inp)
return ans1, ans2
inputs = (np.array([12, -13, 14], dtype=np.int32),
np.array([[4, 8], [3, 6], [2, 4]], dtype=np.int32))
expected_outputs = ([13, -12, 15], [[6, 10], [5, 8], [4, 6]])
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `caller`
# are introduced by the `@ctx.batch` decorator, confusing pylint.
got_outputs = self.evaluate(
caller(inputs, max_stack_depth=15, backend=TF_BACKEND))
self.assertEqual(len(expected_outputs), len(got_outputs))
for exp, got in zip(expected_outputs, got_outputs):
self.assertAllEqual(exp, got)
def testRestructureOnFunctionReturn(self):
def quad_type(args):
return ((args[0], args[0]), (args[0], args[0]))
ctx = frontend.Context()
@ctx.batch(type_inference=quad_type)
def function(x):
left = x + 1, x + 2
right_1 = x + 3
right_2 = x + 4
return left, (right_1, right_2)
def id_type(args):
return args[0]
@ctx.batch(type_inference=id_type)
def caller(x):
(left_1, left_2), right = function(x)
right_1, right_2 = right
return left_1 + left_2 + right_1 + right_2
inputs = np.array([12, -13, 14], dtype=np.int32)
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `caller`
# are introduced by the `@ctx.batch` decorator, confusing pylint.
self.assertAllEqual(
[58, -42, 66],
self.evaluate(caller(inputs, max_stack_depth=5, backend=TF_BACKEND)))
def testBatchDimensionSensitivePrimop(self):
def batchwise_reduce_sum(x):
return tf.reduce_sum(input_tensor=x, axis=tf.range(1, tf.rank(x)))
def my_type(args):
return instructions.TensorType(args[0].dtype, ())
def function(x):
y = batchwise_reduce_sum(x)
return y + y
batched = frontend.Context().batch_uncurried(function, my_type)
inputs = np.array([[12, 13], [-13, 10], [14, 1]], dtype=np.int32)
self.assertAllEqual(
[50, -6, 30],
self.evaluate(batched(inputs, max_stack_depth=5, backend=TF_BACKEND)))
def testUseDummyVariableForPrimop(self):
def my_type(args):
return args[0], args[0]
def callee(x):
return x, x + 1
def function(x):
_, y = callee(x)
return y, y + 1
ctx = frontend.Context()
batched = ctx.batch_uncurried(function, my_type)
inputs = np.array([12, 13, 14], dtype=np.int32)
self.assertAllEqual(
[[13, 14, 15], [14, 15, 16]],
self.evaluate(batched(inputs, max_stack_depth=5, backend=TF_BACKEND)))
def testUseDummyVariableForCall(self):
def my_type(args):
return args[0], args[0]
def callee(x):
return x, x + 1
def function(x):
_, y = callee(x)
return y, y + 1
ctx = frontend.Context()
ctx.batch_uncurried(callee, my_type)
batched = ctx.batch_uncurried(function, my_type)
inputs = np.array([12, 13, 14], dtype=np.int32)
self.assertAllEqual(
[[13, 14, 15], [14, 15, 16]],
self.evaluate(batched(inputs, max_stack_depth=5, backend=TF_BACKEND)))
def testBroadcastInputsToBatchSize(self):
# The desired batch size is inferred from the inputs. This test is checking
# that the system supports broadcasting (other) inputs across the batch
# dimension, which requires not accidentally inferring a batch size of 1.
def my_type(args):
return args[0]
def function(a, b, c, d, e, f, g):
return a + b + c + d + e + f + g
a_in = np.array([12, 13, 14], dtype=np.int32)
b_in = np.array([2], dtype=np.int32)
c_in = np.array([3], dtype=np.int32)
d_in = np.array([4], dtype=np.int32)
ctx = frontend.Context()
batched = ctx.batch_uncurried(function, my_type)
# Repeat several times, because the buggy behavior this is catching
# depended on Python dict order traversal.
for _ in range(10):
self.assertAllEqual(
[39, 40, 41],
batched(a_in, b_in, c_in, d_in, 5, 6, 7,
max_stack_depth=5, backend=NP_BACKEND))
def testCompileCache(self):
ctx = frontend.Context()
def my_type(args):
return args[0]
def function(x):
return x + x
ctx.batch_uncurried(function, my_type)
sig = [instructions.TensorType(np.int64, ())]
prog1 = ctx.program_compiled('function', sig, NP_BACKEND)
prog2 = ctx.program_compiled('function', sig, NP_BACKEND)
self.assertEqual(prog1, prog2)
sig2 = [instructions.TensorType(np.int32, ())]
prog3 = ctx.program_compiled('function', sig2, NP_BACKEND)
self.assertNotEqual(prog1, prog3)
def testLoweringCache(self):
ctx = frontend.Context()
def my_type(args):
return args[0]
def function(x):
return x + x
ctx.batch_uncurried(function, my_type)
sig = [instructions.TensorType(np.int64, ())]
prog1 = ctx.program_lowered('function', sig, NP_BACKEND)
prog2 = ctx.program_lowered('function', sig, NP_BACKEND)
self.assertEqual(prog1, prog2)
sig2 = [instructions.TensorType(np.int32, ())]
prog3 = ctx.program_lowered('function', sig2, NP_BACKEND)
self.assertNotEqual(prog1, prog3)
def testSelfTailCallOptimization(self):
def my_type(args):
return args[0]
ab = frontend.Context()
@ab.batch(type_inference=my_type)
def iota_sum(n, acc):
if n <= 0:
return acc
else:
return iota_sum(n - 1, acc + n)
inputs = [np.array([12, -13, 100], dtype=np.int32),
np.array([0, 0, 0], dtype=np.int32)]
result = ab.lowered_for_args('iota_sum', inputs, backend=TF_BACKEND)
# Check no pushes
for i in range(result.graph.exit_index()):
block = result.graph.block(i)
if isinstance(block.terminator, instructions.PushGotoOp):
assert False, 'iota_sum is tail-recursive: should not push the PC'
for var, alloc in result.var_alloc.items():
if alloc == instructions.VariableAllocation.FULL:
if var != instructions.pc_var:
assert False, 'iota_sum should not push any data'
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `iota_sum`
# are introduced by the `@ctx.batch` decorator, confusing pylint.
self.assertAllEqual(
[78, 0, 5050],
self.evaluate(iota_sum(*inputs, max_stack_depth=3, backend=TF_BACKEND)))
def testParameterSwapping(self):
# The thing that's interesting about this test program is that it contains a
# self-call that writes variables to themselves, but misaligned. To
# implement this correctly, it is necessary to introduce a temporary
# variable so that `a` and `b` can be swapped (b/135275883).
def trace(x):
logging.info(x)
return x
ab = frontend.Context()
def gcd_type(args):
return args[0]
@ab.batch(type_inference=gcd_type)
def gcd(a, b):
if trace(a) == 0:
return b
elif a <= b:
return gcd(a, b - a)
else:
# TODO(b/135275883): Remove these temporaries and check that this
# program still works.
a_tmp = a
b_tmp = b
return gcd(b_tmp, a_tmp)
input_a = np.array([7, 12, 49], dtype=np.int32)
input_b = np.array([9, 9, 56], dtype=np.int32)
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `gcd`
# are introduced by the `@ab.batch` decorator, confusing pylint.
self.assertAllEqual(
[1, 3, 7],
gcd(input_a, input_b, max_stack_depth=3, backend=NP_BACKEND))
class _TestHidingTFBatchSize(object):
def _build_tensor(self, ndarray):
if self.use_static_batch_size:
shape = ndarray.shape
else:
shape = [None] + list(ndarray.shape[1:])
return tf.compat.v1.placeholder_with_default(input=ndarray, shape=shape)
def _check_batch_size(self, tensor, expected):
if self.use_static_batch_size or tf.executing_eagerly():
self.assertEqual(expected, tensor.shape[0])
else:
self.assertEqual(None, tensor.shape[0].value)
def testFibonacciTF(self):
batch_fibo = frontend.Context().batch_uncurried(
fibonacci,
lambda *args: instructions.TensorType(np.int64, ()))
input_2 = self._build_tensor(np.array([6, 7, 8], dtype=np.int64))
answer = batch_fibo(input_2, max_stack_depth=15, backend=TF_BACKEND)
self._check_batch_size(answer, 3)
self.assertAllEqual([13, 21, 34], self.evaluate(answer))
def testOneArmedAndNestedIf(self):
def int_type(_):
return instructions.TensorType(np.int32, ())
ctx = frontend.Context()
@ctx.batch(type_inference=int_type)
def function(x):
ans = 1
if x > 4:
if x > 10:
ans = 5
else:
ans = 3
return ans
inputs = self._build_tensor(np.array([1, 5, 60, 3, 7], dtype=np.int32))
outputs = np.array([1, 3, 5, 1, 3], dtype=np.int32)
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `function`
# are introduced by the `@ctx.batch` decorator, confusing pylint.
answer = function(inputs, max_stack_depth=15, backend=TF_BACKEND)
self._check_batch_size(answer, 5)
self.assertAllEqual(outputs, self.evaluate(answer))
def testCallingUntaggedFunctions(self):
def id_type(args):
return args[0]
def subroutine(batched_x):
return tf.reduce_sum(input_tensor=batched_x, axis=-1, keepdims=True)
ctx = frontend.Context()
@ctx.batch(type_inference=id_type)
def function(x):
y = subroutine(x)
return x + y
inputs = self._build_tensor(np.array([[1, 2],
[5, 6],
[60, 61]], dtype=np.int32))
outputs = np.array([[4, 5],
[16, 17],
[181, 182]], dtype=np.int32)
# pylint: disable=unexpected-keyword-arg
# The `max_stack_depth` and `backend` keyword arguments to `function`
# are introduced by the `@ctx.batch` decorator, confusing pylint.
answer = function(inputs, max_stack_depth=15, backend=TF_BACKEND)
self._check_batch_size(answer, 3)
self.assertAllEqual(outputs, self.evaluate(answer))
def testReferToEnclosingScope(self):
an_object = 'four'
def an_op_on_objects(obj):
return len(obj)
def id_type(args):
return args[0]
def an_autobatch_function(x):
# Expect the object to be pulled in from the enclosing scope, not
# converted to an auto-batch variable.
return x + an_op_on_objects(an_object)
batched = frontend.Context().batch_uncurried(an_autobatch_function, id_type)
inputs = self._build_tensor(np.array([12, -13, 14], dtype=np.int32))
answer = batched(inputs, max_stack_depth=15, backend=TF_BACKEND)
self._check_batch_size(answer, 3)
self.assertAllEqual([16, -9, 18], self.evaluate(answer))
@test_util.run_all_in_graph_and_eager_modes
class TestTFStaticBatchSize(tf.test.TestCase, _TestHidingTFBatchSize):
use_static_batch_size = True
@test_util.run_all_in_graph_and_eager_modes
class TestTFDynamicBatchSize(tf.test.TestCase, _TestHidingTFBatchSize):
use_static_batch_size = False
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
6618850 | import logging
from tqdm import tqdm
import hail as hl
from gnomad.resources.resource_utils import DataException
from gnomad.utils.file_utils import parallel_file_exists
from tgg.batch.batch_utils import (
check_storage_bucket_region,
HG38_REF_PATHS,
localize_file,
init_arg_parser,
init_job,
run_batch,
set_gcloud_project,
)
logging.basicConfig(
format="%(asctime)s (%(name)s %(lineno)s): %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger("run_haplotypecaller")
logger.setLevel(logging.INFO)
EXCLUDE_INTERVALS = (
"gs://gnomad-bw2/exclude_intervals_with_non_ACGT_bases_in_GRCh38__150bp_window.bed"
)
"""
Variants to exclude when running HaplotypeCaller.
"""
PADDING_AROUND_VARIANT = 200
"""
Amount of padding to add around each variant when running HaplotypeCaller.
"""
def parse_args():
"""Parse command line args."""
p = init_arg_parser(default_cpu=1, default_billing_project="gnomad-production")
p.add_argument(
"--gcloud-project",
help="Google cloud project. Default is 'broad-mpg-gnomad'.",
default="broad-mpg-gnomad",
)
p.add_argument(
"-p",
"--output-dir",
help="Where to write haplotype caller output.",
default="gs://gnomad-bw2/gnomad_v3_1_readviz_bamout",
)
p.add_argument(
"--docker-image",
help="Docker image to use.",
default="gcr.io/broad-mpg-gnomad/gnomad-readviz@sha256:7013fc57e3471617a314b08e2bcefe4711d401f83500c5c57e9a3e79ee8efebd",
)
p.add_argument(
"--cram-and-tsv_paths-table",
help="A text file containing at least these columns: sample_id, cram_path",
default=f"step4_output_cram_and_tsv_paths_table.tsv",
)
args = p.parse_args()
return p, args
def main():
"""
Run HaplotypeCaller to generate bamouts.
Step 5 of readviz pipeline.
"""
_, args = parse_args()
hl.init(log="/dev/null", quiet=True)
project = args.gcloud_project
docker_image = args.docker_image
set_gcloud_project(project)
logger.info("Making sure input cram_and_tsv_paths_table arg is valid...")
bams = {}
samples = {}
with hl.hadoop_open(args.cram_and_tsv_paths_table) as c:
# Confirm header has all required columns
header = c.readline().strip().split("\t")
if {"sample_id", "cram_path", "crai_path", "variants_tsv_bgz"} - set(header):
raise DataException(
"%s must contain 'sample_id', 'cram_path', 'crai_path', variants_tsv_bgz' columns!"
)
for line in c:
sample, cram, crai, variants_tsv_bgz = line.strip().split("\t")
# Store output BAM path
bam = f"{args.output_prefix}/{sample}.bamout.bam"
bai = f"{args.output_prefix}/{sample}.bamout.bai"
bams[sample] = bam
# Store sample information
samples[sample] = [cram, crai, variants_tsv_bgz, bam, bai]
logger.info(
"Checking that all input crams are 'US-CENTRAL1' or multi-regional buckets..."
)
# Check that all buckets are in "US-CENTRAL1" or are multi-regional to avoid egress charges to the Batch cluster
check_storage_bucket_region(cram)
logger.info("Checking if any output bams already exist...")
bam_files_exist = parallel_file_exists(list(bams.values()))
samples_without_bams = []
for sample in bams:
if not bam_files_exist[bams[sample]]:
samples_without_bams.append(sample)
# Process samples
with run_batch(args, batch_name=f"HaplotypeCaller -bamout") as batch:
for sample in tqdm(samples_without_bams, unit="samples"):
cram, crai, variants_tsv_bgz, bam, bai = samples[sample]
j = init_job(
batch, f"readviz: {sample}", docker_image, args.cpu, args.memory,
)
j.command(
f"""gcloud -q auth activate-service-account --key-file=/gsa-key/key.json"""
)
local_exclude_intervals = localize_file(j, EXCLUDE_INTERVALS)
local_fasta = localize_file(j, HG38_REF_PATHS.fasta, use_gcsfuse=True)
local_fasta_fai = localize_file(j, HG38_REF_PATHS.fai, use_gcsfuse=True)
localize_file(j, HG38_REF_PATHS.dict, use_gcsfuse=True)
local_tsv_bgz = localize_file(j, variants_tsv_bgz)
local_cram_path = localize_file(j, cram)
j.command(
f"""echo --------------
echo "Start - time: $(date)"
df -kh
# 1) Convert variants_tsv_bgz to sorted interval list
gunzip -c "{local_tsv_bgz}" | awk '{{ OFS="\t" }} {{ print( "chr"$1, $2, $2 ) }}' | bedtools slop -b {PADDING_AROUND_VARIANT} -g {local_fasta_fai} > variant_windows.bed
# Sort the .bed file so that chromosomes are in the same order as in the input_cram file.
# Without this, if the input_cram has a different chromosome ordering (eg. chr1, chr10, .. vs. chr1, chr2, ..)
# than the interval list passed to GATK tools' -L arg, then GATK may silently skip some of regions in the -L intervals.
# The sort is done by first retrieving the input_cram header and passing it to GATK BedToIntervalList.
java -Xms2g -jar /gatk/gatk.jar PrintReadsHeader \
--gcs-project-for-requester-pays {project} \
-R {local_fasta} \
-I "{local_cram_path}" \
-O header.bam
java -Xms2g -jar /gatk/gatk.jar BedToIntervalList \
--SORT true \
--SEQUENCE_DICTIONARY header.bam \
--INPUT variant_windows.bed \
--OUTPUT variant_windows.interval_list
# 2) Get reads from the input_cram for the intervals in variant_windows.interval_list
time java -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -XX:+DisableAttachMechanism -XX:MaxHeapSize=2000m -Xmx30000m \
-jar /gatk/GATK35.jar \
-T HaplotypeCaller \
-R {local_fasta} \
-I "{local_cram_path}" \
-L variant_windows.interval_list \
-XL {local_exclude_intervals} \
--disable_auto_index_creation_and_locking_when_reading_rods \
-ERC GVCF \
--max_alternate_alleles 3 \
-variant_index_parameter 128000 \
-variant_index_type LINEAR \
--read_filter OverclippedRead \
-bamout "{sample}.bamout.bam" \
-o "{sample}.gvcf" |& grep -v "^DEBUG"
bgzip "{sample}.gvcf"
tabix "{sample}.gvcf.gz"
gsutil -m cp "{sample}.bamout.bam" {args.output_dir}
gsutil -m cp "{sample}.bamout.bai" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz.tbi" {args.output_dir}
ls -lh
echo --------------; free -h; df -kh; uptime; set +xe; echo "Done - time: $(date)"; echo --------------
"""
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
283663 | import inspect
from contextlib import ExitStack, contextmanager
from typing import Iterator
import numpy as np
from gustavgrad.tensor import Tensor
class Parameter(Tensor):
" A Parameter is used to register a Tensor as part of a Module "
def __init__(self, *shape: int) -> None:
# TODO: Add possibility to use custom initialization schemes
super().__init__(np.random.randn(*shape), requires_grad=True)
class Module:
" A Module wraps multiple Parameters in a single computational block "
def parameters(self) -> Iterator[Parameter]:
for _, value in inspect.getmembers(self):
if isinstance(value, Parameter):
yield value
elif isinstance(value, Module):
yield from value.parameters()
def zero_grad(self) -> None:
for parameter in self.parameters():
parameter.zero_grad()
@contextmanager
def no_grad(self) -> Iterator[None]:
with ExitStack() as stack:
for parameter in self.parameters():
stack.enter_context(parameter.no_grad())
yield
| StarcoderdataPython |
4868672 | <gh_stars>0
import os
import sys
import numpy as np
from seal import *
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(),
os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from utils import seal_helper
precision = float(125) # precision of 1/125 = 0.004
poly_modulus_degree = 4096
parms = EncryptionParameters(scheme_type.bfv)
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
# Seems like 16 also works
parms.set_plain_modulus(PlainModulus.Batching(poly_modulus_degree, 20))
context = SEALContext(parms)
print("Set encryption parameters and print")
seal_helper.print_parameters(context)
keygen = KeyGenerator(context)
gal_key = keygen.create_galois_keys()
relin_key = keygen.create_relin_keys()
public_key = keygen.create_public_key()
secret_key = keygen.secret_key()
evaluator = Evaluator(context)
batch_encoder = BatchEncoder(context)
encryptor = Encryptor(context, public_key)
decryptor = Decryptor(context, secret_key)
# Save the keys (public, secret, relin and galois)
data_path = './data/'
name = data_path + 'public_key_bfv_1_to_1_py.bin'
public_key.save(name)
print("Saving Public Key: " + name)
name = data_path + 'secret_key_bfv_1_to_1_py.bin'
secret_key.save(name)
print("Saving Secret Key: " + name)
name = data_path + 'relin_key_bfv_1_to_1_py.bin'
relin_key.save(name)
print("Saving Relin Keys: " + name)
name = data_path + 'galois_key_bfv_1_to_1_py.bin'
gal_key.save(name)
print("Saving Galois Keys: " + name)
slot_count = int(batch_encoder.slot_count())
f = open(data_path + "gallery-1-to-1.bin", "rb")
num_gallery = int(np.fromfile(f, dtype=int, count=1))
dim_gallery = int(np.fromfile(f, dtype=int, count=1))
for i in range(num_gallery):
# Load gallery from file
gallery = np.fromfile(f, dtype=np.float32, count=dim_gallery)
# Push gallery into a vector of size poly_modulus_degree
# Actually we should be able to squeeze two gallery instances into one
# vector
# This depends on implementation, can get 2x speed up and 2x less storage
row_size = int(slot_count / 2)
pod_matrix = []
for j in range(row_size):
if 0 <= j and j < dim_gallery:
pod_matrix.append(np.int64(round(precision * gallery[j])))
else:
pod_matrix.append(np.int64(0))
# Encrypt entire vector of gallery
plain_matrix = batch_encoder.encode(pod_matrix)
print("Encrypting Gallery: " + str(i))
encrypted_matrix = encryptor.encrypt(plain_matrix)
# Save encrypted feature vector to disk.
name = data_path + 'encrypted_gallery_bfv_1_to_1_' + str(i) + \
'_py.bin'
encrypted_matrix.save(name)
print("Done")
f.close()
| StarcoderdataPython |
4838286 | from __future__ import annotations
import math
import re
from decimal import Decimal, ROUND_HALF_UP
from dateutil.parser._parser import ParserError
from typing import Dict, Hashable, Union
import json
import numpy
import pandas
from pandas import Series
from .utils import to_utf8_bytes
from .errors import InvalidRedshiftType
Dtype = Union[str, "RedshiftType"]
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
_TYPE_REGEX = re.compile(r"^([a-zA-Z0-9 ]*)(\(([0-9, ]*?)\))?$")
def get_redshift_type(type_str):
m = _TYPE_REGEX.match(type_str)
if not m:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
type_name = m.group(1)
type_args = m.group(3)
type_name = type_name.upper().strip()
type_dict = {
"SMALLINT": SmallInt,
"INT2": SmallInt,
"INTEGER": Integer,
"INT": Integer,
"INT4": Integer,
"BIGINT": BigInt,
"INT8": BigInt,
"DECIMAL": Numeric,
"NUMERIC": Numeric,
"REAL": Real,
"FLOAT4": Real,
"DOUBLE PRECISION": DoublePrecision,
"FLOAT8": DoublePrecision,
"FLOAT": DoublePrecision,
"BOOLEAN": Boolean,
"BOOL": Boolean,
"CHAR": Char,
"CHARACTER": Char,
"NCHAR": Char,
"BPCHAR": BPChar,
"VARCHAR": VarChar,
"CHARACTER VARYING": VarChar,
"NVARCHAR": VarChar,
"TEXT": Text,
"DATE": Date,
"TIMESTAMP": TimeStamp,
"TIMESTAMP WITHOUT TIME ZONE": TimeStamp,
"TIMESTAMPTZ": TimeStampTz,
"TIMESTAMP WITH TIME ZONE": TimeStampTz,
"TIME": Time,
"TIME WITHOUT TIME ZONE": Time,
"TIMETZ": TimeTz,
"TIME WITH TIME ZONE": TimeTz,
"GEOMETRY": Geometry,
"SUPER": Super,
}
if type_name not in type_dict:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
redshift_type = type_dict[type_name]
if type_args:
type_args = [int(elm.strip()) for elm in type_args.split(",")]
else:
type_args = []
return redshift_type(*type_args)
class RedshiftType(object):
"""An abstracttype for Redshift types.
Each type has encoder and decoder.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
type, for ease of dealing.
"""
_ESCAPES = [
("\\", "\\\\"),
("'", "\\'"),
("\n", "\\n"),
("\t", "\\t"),
("\b", "\\b"),
("\f", "\\f"),
]
def _check(self, text, ubytes):
pass
def _encode_text(self, text):
if pandas.isnull(text) or pandas.isna(text):
return "NULL"
ubytes = to_utf8_bytes(str(text))
encoded_text = ubytes.decode("utf-8")
self._check(encoded_text, ubytes)
encoded_text = "\n".join(encoded_text.splitlines())
for old, new in self._ESCAPES:
encoded_text = encoded_text.replace(old, new)
return "'{}'".format(encoded_text)
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed Redshift notations, which are used in DMLs.
First, values are casted to string. Next, character encoding is
changed to ``utf-8``, which Redshift supports as a multibyte
character set. Next, strings are checked in terms of length or
multibyte characters to avoid errors when running ``INSERT``
statements. Then, escapes are replaced. Finally, the string is quoted.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.fillna(numpy.nan)
encoded_col = encoded_col.map(self._encode_text)
return encoded_col
def decode(self, col: Series) -> Series:
"""Decode response from Redshift data api to Python ojects. See
comments on each Redshift type class to confirm what type or class
is used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing Python objects.
"""
return col
def __str__(self):
return self.__redshift_name__
class DoublePrecision(RedshiftType):
"""A type for Redshift ``DOUBLE PRECISION`` type.
This type is decoded to numpy ``float64`` type.
The encoder for this type accepts any types which are able to
casted to numpy ``float64`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__np_type__ = "float64"
__redshift_name__ = "DOUBLE PRECISION"
__min_abs__ = 2.22507385850721e-308
__max_abs__ = 1.79769313486231e308
__to_be_checked__ = True
def _check_range(self, val):
if pandas.isna(val) or val == 0.0:
return val
val_abs = abs(val)
if val_abs < self.__min_abs__ or self.__max_abs__ < val_abs:
raise TypeError(
"'{}' is out of range for type '{}'".format(val, str(self))
)
return val
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
First, values are casted to numpy float type. Next, value
range are checked to avoid overflow or underflow errors
when running ``INSERT`` statements. Finally, the numeric
types are casted to str.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.astype(self.__np_type__)
encoded_col = encoded_col.fillna(numpy.nan)
if self.__to_be_checked__:
encoded_col.map(self._check_range)
encoded_col = encoded_col.replace([numpy.nan], ["NULL"])
encoded_col = encoded_col.astype(str)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str or float types. This decoder will map these raw values to
the proper numpy float type, for ease of dealing.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy float values.
"""
return col.astype(self.__np_type__)
class Real(DoublePrecision):
"""A type for Redshift ``REAL`` type.
This type is decoded to numpy ``float64`` type since deciaml
inaccuracy is observed in case of using numpy ``float32``.
The encoder for this type accepts any values which are able to
casted to numpy ``float64`` type and do not cause overflow or
underflow for Redshift ``REAL`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__redshift_name__ = "REAL"
__min_abs__ = 1.1755e-38
__max_abs__ = 3.40282e38
class Numeric(DoublePrecision):
"""A type for Redshift ``DECIMAL`` type.
In this library, the alias ``NUMERIC`` is used instead to avoid
conflict with Python ``decimal.Decimal`` type.
There are not any fixed point types in numpy. This made us
develop the decoder to cast values from Redshift Data API to
Python ``decimal.Decimal``. Hense, the output for the decoder
looks ``object``-type Series.
The encoder for this type accepts any values which are able to
casted to numpy ``float128`` type and do not cause overflow
for the decimal with the specific precision and scale.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
``decimal.Decimal`` type, for ease of dealing.
"""
__np_type__ = "float128"
__redshift_name__ = "NUMERIC"
def __init__(self, precision: int = 18, scale: int = 0):
"""Construct the Redshift ``NUMERIC`` type.
Parameters
----------
precision :
the numeric precision for use in DDL ``CREATE TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
"""
if precision != 18 or scale != 0:
self.__redshift_name__ = "NUMERIC({},{})".format(precision, scale)
self.__max_abs__ = Decimal(str(math.pow(10.0, precision - scale)))
self.__exp_to_quantize__ = Decimal(
"1.{}".format("".join(["0" for i in range(scale)]))
)
def _encode_numeric(self, val):
if pandas.isna(val):
return "NULL"
decimal_val = Decimal(str(val)).quantize(
self.__exp_to_quantize__, rounding=ROUND_HALF_UP
)
decimal_val_abs = abs(decimal_val)
if self.__max_abs__ <= decimal_val_abs:
raise TypeError(
"'{}' is out of range for type '{}'".format(
decimal_val, str(self)
)
)
return str(decimal_val)
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
First, values are casted to numpy.float128 type to avoid
numeric inaccuracy. Next, numpy.float128 values are converted to
decimal.Decimal values which are accurate under the conditions of
``precision`` and ``scale.`` Then, after checking min/max to avoid
overflow error, values are casted to str.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.astype(self.__np_type__)
encoded_col = encoded_col.fillna(numpy.nan)
encoded_col = encoded_col.map(self._encode_numeric)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will cast these raw values represented
in string to ``decimal.Decimal`` objects. To store
``decimal.Decimal`` objects, the ``dtype`` for the returned
pandas.Series looks ``object``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing ``decimal.Decimal`` values.
"""
def _to_decimal(val):
if pandas.isna(val):
return numpy.nan
return Decimal(val)
return col.map(_to_decimal)
class Integer(DoublePrecision):
"""A type for Redshift ``INTEGER`` type.
This type values are decoded to numpy ``int32`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type and do not cause overflow
for Redshift ``INTEGER`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type__ = "Int64"
__np_type_not_null__ = "int32"
__redshift_name__ = "INTEGER"
__min__ = -2147483648
__max__ = 2147483647
def _check_range(self, val):
if pandas.isna(val):
return numpy.nan
if val < self.__min__ or self.__max__ < val:
raise TypeError(
"'{}' is out of range for type '{}'".format(val, str(self))
)
return val
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str or int type. This decoder will cast these raw values to
numpy int objects.
If NULL is included, this decoder will use the nullable integer
type ``Int64``. Otherwise, numpy integer types, which are ``int16``,
``int32``, or ``int64``, are used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy int values.
"""
if len(col) != col.count():
return col.astype(self.__np_type__)
return col.astype(self.__np_type_not_null__)
class SmallInt(Integer):
"""A type for Redshift ``SMALLINT`` type.
This type values are decoded to numpy ``int16`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type and do not cause overflow
for Redshift ``SMALLINT`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type_not_null__ = "int16"
__redshift_name__ = "SMALLINT"
__min__ = -32768
__max__ = 32767
class BigInt(Integer):
"""A type for Redshift ``BIGINT`` type.
This type values are decoded to numpy ``int64`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type_not_null__ = "int64"
__redshift_name__ = "BIGINT"
__to_be_checked__ = False
class Boolean(Integer):
"""A type for Redshift ``BOOLEAN`` type.
This type values are decoded to numpy ``bool`` in case NULL is
not included, and otherwise decoded to pandas ``boolean``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``boolean`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy bool type, for ease of dealing.
"""
__np_type__ = "boolean"
__np_type_not_null__ = "bool"
__redshift_name__ = "BOOLEAN"
__to_be_checked__ = False
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will cast these raw values to numpy
boolean objects.
If NULL is included, this decoder will use the nullable boolean
type ``boolean``. Otherwise, numpy boolean type ``bool`` is used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy bool values.
"""
decoded_col = col.map(
lambda x: numpy.nan if pandas.isna(x) else (x == "true")
)
return super().decode(decoded_col)
class Char(RedshiftType):
"""A type for Redshift ``CHAR`` type.
This type values are decoded to Python ``str``.
The encoder for this type accepts strings, but it rejects multibyte
characters and too long strings.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__multibyte_is_allowed__ = False
__redshift_name__ = "CHAR"
__default_length__ = 1
__max_length__ = 4096
def __init__(self, length: int = 0):
"""Construct the Redshift ``CHAR``type.
Parameters
----------
length :
Length limitation.
"""
self.length = self.__default_length__ if length == 0 else length
if self.length != self.__default_length__:
if self.length > self.__max_length__:
raise InvalidRedshiftType(
"The length '{}' is too long for '{}'".format(
self.length, self.__redshift_name__
)
)
self.__redshift_name__ = "{}({})".format(
self.__redshift_name__, length
)
def _check(self, text, ubytes):
if (not self.__multibyte_is_allowed__) and len(text) != len(ubytes):
raise TypeError("multibyte characters must not be included")
if len(ubytes) > self.length:
raise TypeError(
"'{}' exceeds length ({})".format(text, self.length)
)
class BPChar(Char):
"""A type for Redshift ``BPCHAR`` type. This type is alias for
``CHAR`` type, but the specification about length is different:
the length is fixed as 256.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__redshift_name__ = "BPCHAR"
__default_length__ = 256
def __init__(self):
"""Construct the Redshift ``BPCHAR`` type."""
self.length = self.__default_length__
class VarChar(Char):
"""A type for Redshift ``VARCHAR`` type.
This type values are decoded to Python ``str``.
The encoder for this type accepts strings. Unlike ``CHAR`` type,
this type accepts multibyte characters.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__multibyte_is_allowed__ = True
__redshift_name__ = "VARCHAR"
__default_length__ = 256
__max_length__ = 65535
class Text(VarChar):
"""A type for Redshift ``TEXT`` type. This type is alias for
``VARCHAR`` type, but the specification about length is different:
the length is fixed as 256.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__redshift_name__ = "TEXT"
def __init__(self):
"""Construct the Redshift ``TEXT`` type."""
self.length = self.__default_length__
class TimeStamp(RedshiftType):
"""A type for Redshift ``TIMESTAMP`` type.
This type values are decoded with ``pandas.to_datetime``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.datetime type, for ease of dealing.
"""
__redshift_name__ = "TIMESTAMP"
__dt_format__ = "%Y-%m-%d %H:%M:%S"
__utc__ = False
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed notations for Redshift DMLs.
First, values are converted to datetime objects with
``pandas.to_datetime``.
Next, values are converted to text with ``strftime`` and format.
In the format, quote is included.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
def _strftime(obj):
if pandas.isnull(obj) or pandas.isna(obj):
return numpy.nan
if hasattr(obj, "strftime"):
return obj.strftime(self.__dt_format__)
return obj
# to deal with such types that is unable to convert to datetime
# with ``pandas.to_datetime`` like Timea
encoded_col = col.map(_strftime)
try:
encoded_col = pandas.to_datetime(encoded_col, utc=self.__utc__)
except ParserError as err:
raise TypeError("cannot parse to datetime {}".format(str(err)))
output_format = "'{}'".format(self.__dt_format__)
encoded_col = encoded_col.dt.strftime(output_format)
encoded_col = encoded_col.fillna("NULL")
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to datetime
objects with ``pandas.to_datetime``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.datetime values.
"""
return pandas.to_datetime(col, errors="coerce", utc=self.__utc__)
class TimeStampTz(TimeStamp):
"""A type for Redshift ``TIMESTAMPTZ`` type.
This type values are decoded with ``pandas.to_datetime`` and
option ``utc``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.datetime type, for ease of dealing.
"""
__redshift_name__ = "TIMESTAMPTZ"
__dt_format__ = "%Y-%m-%d %H:%M:%S%z"
__utc__ = True
class Date(TimeStamp):
"""A type for Redshift ``DATE`` type.
This type values are decoded by converting to datetime objects
with ``pandas.to_datetime`` and in addition converting to date
objects by ``datetime.date()``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.date type, for ease of dealing.
"""
__redshift_name__ = "DATE"
__dt_format__ = "%Y-%m-%d"
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. First, this decoder converts raw values to datetime
objects with ``pandas.to_datetime``. Next, datetime objects are
converted to date objects with ``datetime.date()``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.date values.
"""
col = super().decode(col)
return col.map(lambda dt: dt.date() if not pandas.isna(dt) else dt)
class Time(TimeStamp):
"""A type for Redshift ``TIME`` type.
This type values are decoded by converting to datetime objects
with ``pandas.to_datetime`` and in addition converting to time
objects by ``datetime.time()``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.time type, for ease of dealing.
"""
__redshift_name__ = "TIME"
__dt_format__ = "%H:%M:%S"
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to datetime
objects with ``pandas.to_datetime``. Next, datetime objects are
converted to time objects with ``datetime.time()``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.time values.
"""
col = super().decode(col)
return col.map(lambda dt: dt.time() if not pandas.isna(dt) else dt)
class TimeTz(TimeStamp):
"""A type for Redshift ``TIMETZ`` type.
This type values are decoded by converting to datetime objects
with ``pandas.to_datetime`` and in addition converting to time
objects by ``datetime.timetz()``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.time type, for ease of dealing.
"""
__redshift_name__ = "TIMETZ"
__dt_format__ = "%H:%M:%S%z"
__utc__ = True
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to datetime
objects with ``pandas.to_datetime``. Next, datetime objects are
converted to timetz objects with ``datetime.timetz()``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.time values.
"""
col = super().decode(col)
return col.map(lambda dt: dt.timetz() if not pandas.isna(dt) else dt)
class Geometry(RedshiftType):
""" "A type for Redshift ``GEOMETRY`` type.
The decoder or encoder for this type has not been implemented.
GEOMETRY values can be represented in ``str`` for both query
response and DMLs.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to Python
``str`` type, for ease of dealing.
"""
__redshift_name__ = "GEOMETRY"
class Super(RedshiftType):
"""A type for Redshift ``SUPER`` type. This type is equivalent
to Python generic types: ``int``, ``float``, ``str``, ``list``,
``dict``, and ``None``.
This type values are decoded by converting to Python objects
mainly with ``json.loads``.
The encoder for this type accepts the six Python types mentioned
above. The semi-structured data, ``list`` and ``dict`` types,
are parsed with ``JSON_PARSE`` before ingestion to Redshift.
This type is sub-class for ``VarChar`` to leverage the private
method ``_encode_text``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
Python object type, for ease of dealing.
"""
__redshift_name__ = "SUPER"
def __init__(self):
"""Construct the Redshift ``SUPER`` type."""
self.length = 0
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed notations for Redshift DMLs.
The behavior depends on the type for the value. 1. The ``int``
or ``float`` values are casted to ``str``, 2. ``str`` values
are transformed with the same logic as ``CHAR``/``VARCHAR``-type
encoder, 3. ``list`` or ``dict`` values are once transformed as
``str`` values are done and the transformed values are set as
the argument for ``JSON_PARSE``.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
def _encode_super(obj):
if obj is None:
return "NULL"
elif type(obj) is int or type(obj) is float:
if pandas.isna(obj):
return "NULL"
return str(obj)
elif type(obj) is str:
return self._encode_text(obj)
elif type(obj) is dict or type(obj) is list:
json_str = json.dumps(obj)
encoded_json = self._encode_text(json_str)
return "JSON_PARSE({})".format(encoded_json)
else:
raise TypeError(
"unsupported datatype {} for SUPER type".format(type(obj))
)
encoded_col = col.astype("object")
encoded_col = col.map(_encode_super)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to Python
objects with ``json.loads``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing Python object values.
"""
def _decode_super(obj):
if pandas.isna(obj):
return numpy.nan
return json.loads(obj)
return col.map(_decode_super)
| StarcoderdataPython |
5070954 | <gh_stars>0
from django_filters import rest_framework as filters
from rest_framework import viewsets
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.permissions import AllowAny
from ..serializers.tipo_documento_serializer import TipoDocumentoSerializer
from ...models import TipoDocumento
class TiposDocumentoViewSet(viewsets.ReadOnlyModelViewSet):
lookup_field = 'id'
queryset = TipoDocumento.objects.filter(visivel=True).all()
serializer_class = TipoDocumentoSerializer
permission_classes = [AllowAny]
filter_backends = (filters.DjangoFilterBackend, SearchFilter, OrderingFilter)
ordering_fields = ('nome',)
search_fields = ('uuid', 'id', 'nome')
filter_fields = ('obrigatorio',)
def get_queryset(self):
return self.queryset
def get_serializer_class(self):
return TipoDocumentoSerializer
| StarcoderdataPython |
9606424 | <reponame>kbj2060/pytrader
import json
def remove_dict_items(dictionary, key_list):
list(map(dictionary.pop, key_list))
return dictionary
def clean_duplicate_2d(arr2d):
return list(set(map(tuple, arr2d)))
def write_json(filename, dictionary):
with open(filename, '+w', encoding='utf-8') as f:
json.dump(dictionary, f, indent=4, ensure_ascii=False)
def json2sell_buy(file_list):
stock_json = {}
FILE_NAME_FLAG = 0
for file in file_list:
with open(f'data/{file}', '+r', encoding='utf-8') as f:
_stock_json = json.load(f)
order_type = file.split('.')[FILE_NAME_FLAG]
stock_json[order_type] = _stock_json
return stock_json
def extract_digits_from_string(strings):
return "".join([s for s in strings if s.isdigit()])
def clean_price_value(string):
return string.replace(",", "")
def get_selected_table_row(table_name):
return [x.text() for x in table_name.selectedItems()]
def get_table_header(table_name):
n_columns = table_name.columnCount()
return [table_name.horizontalHeaderItem(idx).text() for idx in range(n_columns)]
def table_row2dict(table_name):
row = get_selected_table_row(table_name)
header = get_table_header(table_name)
if len(row) != len(header):
return False
else:
return dict(zip(header, row))
def id_equal(first_id, second_id):
if id(first_id) == id(second_id):
return True
else:
return False
def empty_check(var):
if not var:
return True
| StarcoderdataPython |
6674306 | <filename>ex079.py
"""Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-os em uma lista.
o número já exista lá dentro, ele não será adicionado.
No final, serão exibidos todos os valores únicos digitados, em ordem crescente."""
num = list()
resp = 'S'
while resp in "Ss":
n = (int(input('Digite um valor: ')))
if n not in num:
num.append(n)
print('Valor adicionado.')
else:
print('Valor duplicado. Não vou adicionar.')
resp = str(input('Quer continuar? [S/N] '))
print('Programa finalizado')
num.sort()
print('Os números digitados em ordem crescente foram:', end=' ')
print(*num, sep=', ')
| StarcoderdataPython |
232611 | """
Contains methods and classes pertaining to two dimensional bounds;
specifically circles and squares.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
class Bounds(metaclass=ABCMeta):
"""
Represents a two-dimensional bounding area that can be tested for
intersections with a variety of geometric shapes.
Attributes:
center (numpy.array): The center of the bounding area in
two-dimensional space.
"""
def __init__(self, center=np.zeros(2, dtype=np.int32)):
self.center = center
@abstractmethod
def intersects(self, bounds):
"""
Tests whether or not an intersection occurred between this bounds the
specified bounds.
:param bounds: The bounding area to test.
:return: Whether or not an intersection occurred.
"""
pass
@abstractmethod
def intersects_circle(self, circle):
"""
Tests whether or not an intersection occurred between this bounds and
the specified bounding circle.
:param circle: The circle to test.
:return: Whether or not an intersection occurred.
"""
pass
@abstractmethod
def intersects_square(self, square):
"""
Tests whether or not an intersection occurred between this bounds and
the specified bounding square.
:param square: The square to test.
:return: Whether or not an intersection occurred.
"""
pass
| StarcoderdataPython |
8142435 | <reponame>ranjeethmahankali/galproject
import pygalfunc as pgf
import pygalview as pgv
import os
POINTS = [
(0, 0, 0),
(1, 0, 0),
(1, 1, 0),
(-.3, 1, 0),
(0, -1, 0),
]
GLYPHDATA = ["/home/rnjth94/works/YouTube/GAL_BoundingCircle/receiverDishGlyph.png",
"/home/rnjth94/works/YouTube/GAL_BoundingCircle/transmitterGlyph.png"]
def initGlyphs():
return pgv.loadGlyphs(GLYPHDATA)
if __name__ == "__main__":
glyphs = initGlyphs()
pts = pgf.var_vec3(POINTS)
cloudGlyphs = pgf.var_int([glyphs[0]
for _ in range(len(POINTS))])
idxPt = pgf.listItem(pts, pgv.slideri32("Index", 0, len(POINTS) - 1, 0))
circ, center, radius = pgf.boundingCircle(pts)
center3 = pgf.vec3FromVec2(center)
centerGlyph = pgf.var_int(glyphs[1])
pgv.show("glyph1", pgv.glyphs(cloudGlyphs, pts))
pgv.show("glyph2", pgv.glyphs(centerGlyph, center3))
pgv.show("circle", circ)
pgv.show("points", pts)
pgv.show("center", center3)
pgv.print("Point at index", idxPt)
| StarcoderdataPython |
3511436 | <reponame>Jeff-Moorhead/flaskquotes
import os
import shutil
from setuptools import setup, find_packages
version = {}
with open("README.md", "r") as fh:
long_description = fh.read()
with open("./flaskquotes/version.py", "r") as vh:
exec(vh.read(), version)
setup(name="flaskquotes",
description="Get AFI top 100 quotes",
author="<NAME>",
author_email="<EMAIL>",
long_description=long_description,
long_description_content_type="text/markdown",
version = version.get("__version__", "0.0.0"),
packages=find_packages(),
entry_points={
'console_scripts': [
'quotes=flaskquotes.console:main'
]
},
install_requires=['BeautifulSoup4', 'Flask', 'gunicorn'],
package_data={"flaskquotes": ["data/quotes.json"]},
)
| StarcoderdataPython |
1910497 | <filename>api/cloud_provider/migrations/0004_region_comment.py
# Generated by Django 2.1.2 on 2019-07-30 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cloud_provider', '0003_auto_20190730_0239'),
]
operations = [
migrations.AddField(
model_name='region',
name='comment',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Comment'),
),
]
| StarcoderdataPython |
5085366 | <gh_stars>10-100
def polynomial_decay(initial, final, max_decay_steps, power, current_step):
"""Decays hyperparameters polynomially. If power is set to 1.0, the decay behaves linearly.
Arguments:
initial {float} -- Initial hyperparameter such as the learning rate
final {float} -- Final hyperparameter such as the learning rate
max_decay_steps {int} -- The maximum numbers of steps to decay the hyperparameter
power {float} -- The strength of the polynomial decay
current_step {int} -- The current step of the training
Returns:
{float} -- Decayed hyperparameter
"""
# Return the final value if max_decay_steps is reached or the initial and the final value are equal
if current_step > max_decay_steps or initial == final:
return final
# Return the polynomially decayed value given the current step
else:
return ((initial - final) * ((1 - current_step / max_decay_steps) ** power) + final) | StarcoderdataPython |
3251788 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import subprocess
from Execution import ExecutionBase
# Worker module is used for manage the write and read workers. Read/write workers
# will be started with the corresponding commands. When the testing ends, read and
# write workers will be stopped using the stop functions.
class Workers(ExecutionBase):
def __init__(self, worker_file, hosts, execution_inst, benchname, commands):
self.logger = logging.getLogger("coordinator.run")
self.worker_file = worker_file
self.name = benchname
self.execution_inst = execution_inst
self.hosts = hosts
# The backfill workloads uses read workers but different parameters
if benchname == "backfill":
benchname = "read"
self.commands = f" --bench-name={benchname} {commands}"
def start(self):
worker_id = 0
total_worker = len(self.hosts)
for host in self.hosts:
remote_inst = self.execution_inst.split()
remote_inst.append(host)
remote_inst.append(self.worker_file)
remote_inst.append(
"{} --worker-id={}/{}".format(self.commands, worker_id, total_worker)
)
self.logger.info(remote_inst)
try:
worker_proc = subprocess.Popen(remote_inst)
except OSError:
self.logger.error(f"Start {self.name} {worker_id} failed")
return False
self.processes.append(worker_proc)
worker_id = worker_id + 1
return True
| StarcoderdataPython |
6555365 | # seatable
TEMPLATE_BASE_API_TOKEN = ''
DTABLE_WEB_SERVICE_URL = ''
EMAIL_TABLE_NAME = ''
LINK_TABLE_NAME = ''
LANG = ''
# email
EMAIL_SERVER = ''
EMAIL_USER = ''
EMAIL_PASSWORD = ''
import os
import sys
if os.path.isfile(os.path.join(os.path.dirname(__file__), 'email_syncer_settings.py')):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'email_syncer_settings.py'))
try:
from email_syncer_settings import *
except:
pass
| StarcoderdataPython |
3274113 | import urllib2
from bs4 import BeautifulSoup
import re
adds = ['http://m.moneycontrol.com/sensex/bse/sensex-live']
for add in adds:
page = urllib2.urlopen(add)
soup = BeautifulSoup(page,'lxml')
val_tag=str(soup.find("span", {"id":'ind_c_close'}))
change_tag=str(soup.find("span", {"id":'ind_chg'}))
re1='.*?' # Non-greedy match on filler
re2='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1
re3='.*>(.*)<' #For change in the value
rg = re.compile(re1+re2,re.IGNORECASE|re.DOTALL)
val_str= rg.search(val_tag).group(1)
rg = re.compile(re3)
change_str = rg.search(change_tag).group(1)
change_num = float(change_str.split(' ')[0])
if(change_num>=0):
icon = 'bull'
else:
icon = 'bear'
file = open('stocks.txt','w')
file.write(val_str+'\n')
file.write(change_str+'\n')
file.write(icon)
file.close()
| StarcoderdataPython |
347720 | #!/user/bin/python3
# -*- coding: utf-8 -*-
'''
变量从内存中变成可存储或传输的过程称之为序列化,在Python中叫pickling
Pickle的问题和所有其他编程语言特有的序列化问题一样,就是它只能用于Python,
并且可能不同版本的Python彼此都不兼容,因此,只能用Pickle保存那些不重要的数据,
不能成功地反序列化也没关系。
Python内置的json模块提供了非常完善的Python对象到JSON格式的转换
'''
import json
import os
import pickle
d = {'name': 'Bob', 'age': 20, 'score': 88}
d1 = dict(name='Bob', age=20, score=88)
destPath = os.path.join('demo/resources', 'pickling.txt')
print(destPath)
# 直接序列化
f = open(destPath, 'wb')
pickle.dump(d, f)
f.close()
# 反序列化
f = open(destPath, 'rb')
d = pickle.load(f)
f.close()
print('pickle 反序列化 ', d)
# json序列化
destJsonPath = os.path.join('demo/resources', 'json.txt')
f = open(destJsonPath, 'w')
json.dump(d, f)
f.close()
# json反序列化
f = open(destJsonPath, 'r')
d = json.load(f)
f.close()
print('json 反序列化 ', d)
| StarcoderdataPython |
3477773 | import xml.sax
import copy
from math import *
from graphserver.vincenty import vincenty
INFINITY = float('inf')
def download_osm(left,bottom,right,top):
""" Return a filehandle to the downloaded data."""
from urllib.request import urlopen
fp = urlopen( "http://api.openstreetmap.org/api/0.5/map?bbox=%f,%f,%f,%f"%(left,bottom,right,top) )
return fp
def dist(x1,y1,x2,y2):
return ((x2-x1)**2+(y2-y1)**2)**0.5
def dist_haversine(x0,y0,x1,y1):
# Use spherical geometry to calculate the surface distance, in meters
# between two geodesic points. Uses Haversine formula:
# http://en.wikipedia.org/wiki/Haversine_formula
radius = 6371000 # Earth mean radius in m
lon0 = x0 * PI / 180 #rad
lat0 = y0 * PI / 180 #rad
lon1 = x1 * PI / 180 #rad
lat1 = y1 * PI / 180 #rad
dLat = (lat1 - lat0) #rad
dLon = (lon1 - lon0) #rad
a = sin(dLat/2) * sin(dLat/2) + cos(lat0) * cos(lat1) * sin(dLon/2) * sin(dLon/2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
return radius * c
class Node:
def __init__(self, id, lon, lat):
self.id = id
self.lon = lon
self.lat = lat
self.tags = {}
def __repr__(self):
return "<Node id='%s' (%s, %s) n_tags=%d>"%(self.id, self.lon, self.lat, len(self.tags))
class Way:
def __init__(self, id, osm, tolerant=False):
self.osm = osm
self.id = id
self.nd_ids = []
self.tags = {}
self.tolerant = tolerant #skip over dangling nd references
@property
def nds(self):
for nd_id in self.nd_ids:
try:
yield self.osm.nodes[nd_id]
except KeyError:
if self.tolerant:
pass
else:
raise KeyError( "Way references undefined node '%s'"%nd_id )
@property
def geom(self):
return [(nd.lon, nd.lat) for nd in self.nds]
@property
def bbox(self):
l = INFINITY
b = INFINITY
r = -INFINITY
t = -INFINITY
for x,y in self.geom:
l = min(l,x)
r = max(r,x)
b = min(b,y)
t = max(t,y)
return (l,b,r,t)
def split(self, dividers):
# slice the node-array using this nifty recursive function
def slice_array(ar, dividers):
for i in range(1,len(ar)-1):
if dividers[ar[i]]>1:
#print "slice at %s"%ar[i]
left = ar[:i+1]
right = ar[i:]
rightsliced = slice_array(right, dividers)
return [left]+rightsliced
return [ar]
slices = slice_array(self.nd_ids, dividers)
# create a way object for each node-array slice
ret = []
i=0
for slice in slices:
littleway = copy.copy( self )
littleway.id += "-%d"%i
littleway.nd_ids = slice
ret.append( littleway )
i += 1
return ret
def get_projected_points(self, reprojection_func=lambda x,y:(x,y)):
"""nodedir is a dictionary of nodeid->node objects. If reprojection_func is None, returns unprojected points"""
ret = []
for nodeid in self.nd_ids:
node = self.osm.nodes[ nodeid ]
ret.append( reprojection_func(node.lon,node.lat) )
return ret
def to_canonical(self, srid, reprojection_func=None):
"""Returns canonical string for this geometry"""
return "SRID=%d;LINESTRING(%s)"%(srid, ",".join( ["%f %f"%(x,y) for x,y in self.get_projected_points()] ) )
def length(self):
"""nodedir is a dictionary of nodeid->node objects"""
ret = 0
for i in range(len(self.nd_ids)-1):
thisnode = self.osm.nodes[ self.nd_ids[i] ]
nextnode = self.osm.nodes[ self.nd_ids[i+1] ]
ret += vincenty(thisnode.lat, thisnode.lon, nextnode.lat, nextnode.lon)
return ret
def length_haversine(self):
ret = 0
for i in range(len(self.nds)-1):
thisnode = self.osm.nodes[ self.nds[i] ]
nextnode = self.osm.nodes[ self.nds[i+1] ]
ret += dist(thisnode.lon,thisnode.lat,nextnode.lon,nextnode.lat)
return ret
@property
def fromv(self):
return self.nd_ids[0]
@property
def tov(self):
return self.nd_ids[-1]
def __repr__(self):
return "<Way id='%s' n_nds=%d n_tags=%d>"%(self.id, len(self.nd_ids), len(self.tags))
class OSM:
def __init__(self, filename_or_stream, tolerant=False):
""" File can be either a filename or stream/file object."""
nodes = {}
ways = {}
superself = self
class OSMHandler(xml.sax.ContentHandler):
@classmethod
def setDocumentLocator(self,loc):
pass
@classmethod
def startDocument(self):
pass
@classmethod
def endDocument(self):
pass
@classmethod
def startElement(self, name, attrs):
if name=='node':
self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))
elif name=='way':
self.currElem = Way(attrs['id'], superself, tolerant)
elif name=='tag':
self.currElem.tags[attrs['k']] = attrs['v']
elif name=='nd':
self.currElem.nd_ids.append( attrs['ref'] )
@classmethod
def endElement(self,name):
if name=='node':
nodes[self.currElem.id] = self.currElem
elif name=='way':
ways[self.currElem.id] = self.currElem
@classmethod
def characters(self, chars):
pass
xml.sax.parse(filename_or_stream, OSMHandler)
self.nodes = nodes
self.ways = ways
#count times each node is used
node_histogram = dict.fromkeys( self.nodes.keys(), 0 )
todel = []
for way in self.ways.values():
if len(way.nd_ids) < 2: #if a way has only one node, delete it out of the osm collection
todel.append( way.id )
#have to do it in two passes, or else you change the size of dict during iteration
for way_id in todel:
del self.ways[way_id]
for way in self.ways.values():
for node in way.nd_ids:
try:
node_histogram[node] += 1
except KeyError:
node_histogram[node] = 1
#use that histogram to split all ways, replacing the member set of ways
new_ways = {}
for id, way in self.ways.items():
split_ways = way.split(node_histogram)
for split_way in split_ways:
new_ways[split_way.id] = split_way
self.ways = new_ways
@property
def connecting_nodes(self):
"""List of nodes that are the endpoint of one or more ways"""
ret = {}
for way in self.ways.values():
ret[way.fromv] = self.nodes[way.fromv]
ret[way.tov] = self.nodes[way.tov]
return ret.values()
@classmethod
def download_from_bbox(cls, left, bottom, right, top ):
""" Retrieve remote OSM data."""
fp = download_osm(left, bottom, right, top)
osm = cls(fp)
fp.close()
return osm
def find_nearest_node(self, lng, lat):
""" Brute force effort to find the nearest start or end node based on lat/lng distances."""
best = self.nodes[self.ways[self.ways.keys()[0]].nd_ids[0]]
bdist = dist(best.lon, best.lat, lng, lat)
for id, way in self.ways.iteritems():
for i in (0,-1):
nd = self.nodes[way.nd_ids[i]]
d = dist(lng, lat, nd.lon, nd.lat)
if d < bdist:
bdist = d
best = nd
return best
@property
def bbox(self):
l = INFINITY
b = INFINITY
r = -INFINITY
t = -INFINITY
for way in self.ways.values():
ll, bb, rr, tt = way.bbox
l = min(l,ll)
b = min(b,bb)
r = max(r,rr)
t = max(t,tt)
return (l,b,r,t)
| StarcoderdataPython |
8000911 | import math
from magicbot import tunable
from components import swervedrive
from .base_pid_controller import BasePIDComponent
from . import field_centric, position_tracker
class XPosController(BasePIDComponent):
drive = swervedrive.SwerveDrive
tracker = position_tracker.PositionTracker
kP = tunable(0.1)
kI = tunable(0.0)
kD = tunable(0.0)
kF = tunable(0.0)
kToleranceFeet = tunable(0.25)
kIzone = tunable(0.25)
def __init__(self):
super().__init__(self.get_position, 'x_ctrl')
self.set_abs_output_range(0.16, 0.8)
def get_position(self):
return self.tracker.get_x() / 1.0
def move_to(self, position):
self.setpoint = position
def is_at_location(self):
return self.enabled and \
abs(self.get_position() - self.setpoint) < self.kToleranceFeet
def pidWrite(self, output):
self.rate = -output
def execute(self):
super().execute()
if self.rate is not None:
if self.is_at_location():
self.drive.set_raw_strafe(0)
else:
self.drive.set_raw_strafe(self.rate)
class YPosController(BasePIDComponent):
drive = swervedrive.SwerveDrive
tracker = position_tracker.PositionTracker
kP = tunable(0.09)
kI = tunable(0.0)
kD = tunable(0.0)
kF = tunable(0.0)
kToleranceFeet = tunable(0.25)
kIzone = tunable(0.25)
def __init__(self):
super().__init__(self.get_position, 'y_ctrl')
self.set_abs_output_range(0.16, 0.8)
def get_position(self):
return self.tracker.get_y() / 1.0
def move_to(self, position):
self.setpoint = position
def is_at_location(self):
return self.enabled and abs(self.get_position() - self.setpoint) < self.kToleranceFeet
def execute(self):
super().execute()
if self.rate is not None:
if self.is_at_location():
self.drive.set_raw_fwd(0)
else:
self.drive.set_raw_fwd(self.rate)
class FCXPosController(XPosController):
field_centric = field_centric.FieldCentric
fc_tracker = position_tracker.FCPositionTracker
def get_position(self):
return self.fc_tracker.get_x() / 1.0
def execute(self):
super().execute()
if self.rate is not None:
if self.is_at_location():
self.field_centric.set_strafe(0)
else:
self.field_centric.set_strafe(self.rate)
#print(self.rate)
class FCYPosController(YPosController):
field_centric = field_centric.FieldCentric
fc_tracker = position_tracker.FCPositionTracker
def get_position(self):
return self.fc_tracker.get_y() / 1.0
def execute(self):
super().execute()
if self.rate is not None:
if self.is_at_location():
self.field_centric.set_fwd(0)
else:
self.field_centric.set_fwd(self.rate)
| StarcoderdataPython |
136626 | <reponame>Kulbear/endless-2048<gh_stars>10-100
from .game import Game2048
| StarcoderdataPython |
171684 | import torch
import torch.nn as nn
from .utils import _calc_padding, _unpack_from_convolution, _pack_for_convolution
class GraphAndConv(nn.Module):
def __init__(self, input_dim, output_dim, conv_kernel_size, intermediate_dim=None):
super(GraphAndConv, self).__init__()
if intermediate_dim is None:
intermediate_dim = output_dim
self.lin = nn.Linear(2*input_dim, intermediate_dim)
padding = _calc_padding(1, conv_kernel_size)
self.conv = nn.Conv1d(intermediate_dim, output_dim, conv_kernel_size, padding=padding)
self.input_dim = input_dim
self.intermediate_dim = intermediate_dim
self.output_dim = output_dim
def forward(self, adj, inputs):
batch_size = inputs.shape[0]
mask = adj.sum(dim=2).bool()
x = torch.einsum('bilc,bij->bjlc', inputs, adj)
x = torch.cat((x, inputs), dim=-1)
x = self.lin(x)
x = _pack_for_convolution(x)
x = self.conv(x)
x = _unpack_from_convolution(x, batch_size)
x[~mask] = 0.
return x
| StarcoderdataPython |
12834935 | <reponame>Anirban166/tstl
import avl
import random
import sys
import coverage
import time
import numpy
start = time.time()
branchesHit = set()
maxval = int(sys.argv[1])
testlen = int(sys.argv[2])
numtests = int(sys.argv[3])
cov = coverage.coverage(branch=True, source=["avl.py"])
cov.start()
for t in xrange(0,numtests):
a = avl.AVLTree()
test = []
ref = set()
for s in xrange(0,testlen):
h = a.height
n = len(ref)
if (n > 0):
if not (h <= (numpy.log2(n)+1)):
print h
print n
print (numpy.log2(n))
sys.exit(0)
op = random.choice(["add","del","find"])
val = random.randrange(0,maxval)
test.append((op,val))
if op == "add":
a.insert(val)
ref.add(val)
elif op == "del":
a.delete(val)
ref.discard(val)
elif op == "find":
assert (a.find(val) == (val in ref))
currBranches = cov.collector.get_arc_data()
for src_file, arcs in currBranches.iteritems():
for arc in arcs:
branch = (src_file, arc)
if branch not in branchesHit:
branchesHit.add(branch)
elapsed = time.time()-start
print elapsed,len(branchesHit),branch
avlitems = a.inorder_traverse()
setitems = []
for item in ref:
setitems.append(item)
setitems = sorted(setitems)
assert (avlitems == setitems)
| StarcoderdataPython |
8165545 | from typing import NoReturn
from components.header import header
from functions.square import squareAreaMenu
from functions.triangle import triangleAreaMenu
from functions.trapeze import trapezeAreaMenu
from functions.diamond import diamondAreaMenu
from functions.circle import circleAreaMenu
def flatFiguresMenu() -> NoReturn:
'''
This function displays the list of math figures options and select the choice.
'''
header()
print('Áreas das figuras planas'.center(66))
print("""
[1] Área de um quadrado, retângulo ou paralelogramo
[2] Área de um triangulo
[3] Área de um trapézio
[4] Área de um lozango
[5] Área de um círculo
""")
escolha = input('➤ ')
selectFunction = {
'1': lambda: squareAreaMenu(),
'2': lambda: triangleAreaMenu(),
'3': lambda: trapezeAreaMenu(),
'4': lambda: diamondAreaMenu(),
'5': lambda: circleAreaMenu(),
}
selectFunction[escolha]()
| StarcoderdataPython |
306326 | #!/usr/bin/env python
#
# Given a DNS name and type, return the records in the DNS answer
# section only, excluding any RRSIG records.
#
import getdns, pprint, sys
extensions = { "dnssec_return_status" : getdns.EXTENSION_TRUE }
def get_rrtype(qtype):
try:
rrtype = eval("getdns.RRTYPE_%s" % qtype.upper())
except AttributeError:
print("Unknown DNS record type: {0}".format(qtype))
sys.exit(1)
else:
return rrtype
def print_answer(r):
pprint.pprint(r.replies_tree[0]['answer'])
return
if __name__ == '__main__':
qname, qtype = sys.argv[1:]
rrtype = get_rrtype(qtype)
ctx = getdns.Context()
try:
results = ctx.general(name=qname, request_type=rrtype,
extensions=extensions)
except getdns.error as e:
print(str(e))
sys.exit(1)
status = results.status
if status == getdns.RESPSTATUS_GOOD:
for reply in results.replies_tree:
answers = reply['answer'] # list of 1 here
for answer in answers:
if answer['type'] != getdns.RRTYPE_RRSIG:
pprint.pprint(answer)
elif status == getdns.RESPSTATUS_NO_NAME:
print("{0}, {1}: no such name".format(qname, qtype))
elif status == getdns.RESPSTATUS_ALL_TIMEOUT:
print("{0}, {1}: query timed out".format(qname, qtype))
else:
print("{0}, {1}: unknown return code: {2}".format(qname, qtype, results.status))
| StarcoderdataPython |
5124992 | <gh_stars>1-10
n,m = map(int,input().split())
chess = [input() for _ in range(n)]
min_n = 64
for i in range(n-7):
for j in range(m-7):
cnt1= cnt2= 0
st = chess[i][j]
for k in range(i,i+8):
for s in range(j,j+8):
if k%2==s%2 and chess[k][s]!=st: cnt1+=1
elif k%2!=s%2 and chess[k][s]==st: cnt1+=1
elif k%2==s%2 and chess[k][s]==st: cnt2+=1
else: cnt2+=1
min_n = min(cnt1,cnt2,min_n)
print(min_n)
| StarcoderdataPython |
12866375 | # !/usr/bin/env python
# coding=utf8
import json
import traceback
from tornado.web import RequestHandler
from pfrock.cli import logger
from pfrock.core.constants import PFROCK_CONFIG_SERVER, PFROCK_CONFIG_ROUTER, PFROCK_CONFIG_PORT, ROUTER_METHOD, \
ROUTER_PATH, ROUTER_OPTIONS, ROUTER_HANDLER
from pfrock.core.lib import auto_str
@auto_str
class PfrockConfigRouter(object):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS
def __init__(self, path, methods, handler, options={}):
self.path = path
self.handler = handler
self.options = options
self.methods = []
if methods == "any":
self.methods = []
else:
for method in methods:
method = method.upper()
if method in self.SUPPORTED_METHODS:
self.methods.append(method)
@auto_str
class PfrockConfigServer(object):
def __init__(self, routes, port):
self.routes = routes
self.port = port
class PfrockConfigParser(object):
@classmethod
def _parse_router(cls, router):
path = router[ROUTER_PATH] if ROUTER_PATH in router else None
methods = router[ROUTER_METHOD] if ROUTER_METHOD in router else []
handler = router[ROUTER_HANDLER] if ROUTER_HANDLER in router else None
options = router[ROUTER_OPTIONS] if ROUTER_OPTIONS in router else None
if path and handler:
return PfrockConfigRouter(path, methods, handler, options)
return None
@classmethod
def _parse_routers(cls, routers):
router_list = []
for router in routers:
router = cls._parse_router(router)
if router:
router_list.append(router)
return router_list
@classmethod
def _parse_servers(cls, server):
port = server[PFROCK_CONFIG_PORT] if PFROCK_CONFIG_PORT in server else None
routers = cls._parse_routers(server[PFROCK_CONFIG_ROUTER]) if PFROCK_CONFIG_ROUTER in server else None
if port and routers:
return PfrockConfigServer(routers, port)
@classmethod
def do(cls, config_file_path):
with open(config_file_path, 'r') as fin:
try:
config_data = json.load(fin)
except:
logger.error("%s not well formed \n%s" % (config_file_path, traceback.format_exc()))
return None
config_servers = config_data[PFROCK_CONFIG_SERVER] if PFROCK_CONFIG_SERVER in config_data else None
if config_servers:
for config_server in config_servers:
config_server = cls._parse_servers(config_server)
# todo: dev version just support one server
return config_server
return None
| StarcoderdataPython |
9621436 | <filename>usermaker.py
#!/usr/bin/env python3
import argparse, random, sys
parser = argparse.ArgumentParser(epilog="""
This python script creates files that can be used to create and
destroy a set of guest users. You need to supply a prefix for
the filenames created, and a number of users to create.
Use <output_prefix>_create.sh as root to create the users.
Use <output_prefix>_destroy.sh as root to destroy the users.
<output_prefix>.html can be printed to create slips of paper to
give out with usernames and passwords.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("output_prefix",help="Output files prefix.")
parser.add_argument("n",type=int,help="Number of users.")
parser.add_argument("-p","--prefix",help="User name prefix.",default="guest")
parser.add_argument("-u","--url",help="URL to direct users to.",default="biotraining.erc.monash.edu")
args = parser.parse_args()
n = args.n
users = [ ("%s%d" % (args.prefix, i+1), ''.join( random.choice("abcdefghkmnpqrstuvwxyz") for i in range(8) ))
for i in range(args.n) ]
with open(args.output_prefix+"_create.sh", "w") as f:
for name, password in users:
print("adduser "+name+" --gecos "+name+" --disabled-password", file=f)
print("echo "+name+":"+password+" |chpasswd", file=f)
print("rstudio-server restart #RStudio can become confused if there is a new user with the same username as an old user", file=f)
with open(args.output_prefix+"_destroy.sh", "w") as f:
for name, password in users:
print("pkill -u "+name+" ; deluser "+name+" --backup --remove-home", file=f)
with open(args.output_prefix+".html","w") as f:
print("<!DOCTYPE html>", file=f)
for i, (name, password) in enumerate(users):
print("<pre style=\"font-size: 110%; page-break-inside: avoid\">", file=f)
print(" Go to: "+args.url, file=f)
print("Username: "+name, file=f)
print("Password: "+password, file=f)
print("", file=f)
print("", file=f)
print("</pre>", file=f)
if i % 10 == 9:
print('<p style="page-break-after: always;"> </p>', file=f)
#print('<p style="page-break-after: always;"> </p>', file=f)
| StarcoderdataPython |
260875 | <reponame>AmreshTripathy/Python<gh_stars>1-10
t = ( 10, 11, 12, 34, 99, 4, 98)
print (t[0])
t1 = (1, 1, 1, 2, 3, 4, 65, 65, 3, 2) #tuple with single element
print (t1.count(1))
print (t1.index(65)) | StarcoderdataPython |
3236264 | import numpy as np
class ReplaceNulls():
def __init__(self):
self.name = 'Replace Nulls'
self.description = 'Replace NULL values in a raster with a user defined value.'
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value':'Multiband Raster',
'required': True,
'displayName': 'Input Raster',
'description': 'Input Raster'
},
{
'name': 'fill_val',
'dataType': 'numeric',
'value': 65535,
'required': True,
'displayName': 'New NoData',
'description': 'New NoData value to use'
}
]
def getConfiguration(self, **scalars):
return {
'inheritProperties': 1 | 4 | 8, # inherit everything but NoData (2)
'inputMask': True #the input masks are made available in the pixelBlocks keyword
}
def updateRasterInfo(self, **kwargs):
self.fill_val = kwargs['fill_val']
bands = kwargs['raster_info']['bandCount']
pixtype = kwargs['raster_info']['pixelType']
kwargs['output_info']['bandCount'] = bands
kwargs['output_info']['pixelType'] = pixtype
kwargs['output_info']['noData'] = np.array(np.full(bands, fill_value=self.fill_val, dtype=pixtype))
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
pix_array = np.asarray(pixelBlocks['raster_pixels'])
np.place(pix_array, pix_array==0, [self.fill_val])
mask = np.ones(pix_array.shape)
pixelBlocks['output_mask'] = mask.astype('u1', copy = False)
pixelBlocks['output_pixels'] = pix_array.astype(props['pixelType'], copy=True)
return pixelBlocks
| StarcoderdataPython |
12829375 |
class _ANY(object):
"""
A helper object that compares equal to everything.
Shamelessly stolen from Mock.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class CaseObject(object):
"""
Base class for objects that can be constructed
and matched against each other.
TODO: enforce immutability
"""
def __init__(self, **kwargs):
self.list = []
for name, value in kwargs.items():
setattr(self, name, value)
self.list.append(name)
def best_match(self, *args, **kwargs):
winner = None
winner_score = 0
for case in args:
if(self.__match_num(case.obj) > winner_score):
winner = case
winner_score += 1
if winner:
winner.function(self)
else:
default = kwargs['default']
if default:
default()
def first_exact_match(self, *args, **kwargs):
for case in args:
if(self.__is_hard_match(case.obj)):
case.function(self)
return
default = kwargs['default']
if default:
default()
def __is_hard_match(self, obj):
"""
True is the objects are an exact match, defined
as each attribute matching, or ANY for that attribute.
"""
for attr in self.list:
try:
if getattr(obj, attr) != getattr(self, attr):
return False
except AttributeError:
pass
return True
def __match_num(self, obj):
"""
Number of fields that match. For instance, if self
has attrs for Name, Age, Gender, and obj has matching
fields for Name and Gender, then 2 is returned.
"""
score = 0
for attr in self.list:
try:
if getattr(obj, attr) == getattr(self, attr):
score += 1
except AttributeError:
pass
return score
class Case(object):
def __init__(self, obj, function):
self.obj = obj
self.function = function
def case(obj, f):
return Case(obj, f)
def default(f):
return Case(None, f)
| StarcoderdataPython |
3355425 | <filename>sift_pyx12/error_999.py
######################################################################
# Copyright
# <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file LICENSE.txt, which
# you should have received as part of this distribution.
#
######################################################################
"""
Generates a 999 Response
Visitor - Visits an error_handler composite
"""
import time
import logging
import random
# Intrapackage imports
from sift_pyx12.errors import EngineError
import sift_pyx12.error_visitor
import sift_pyx12.segment
import sift_pyx12.x12file
logger = logging.getLogger('sift_pyx12.error_999')
logger.setLevel(logging.DEBUG)
class error_999_visitor(sift_pyx12.error_visitor.error_visitor):
"""
Visit an error_handler composite. Generate a 999.
"""
def __init__(self, fd, term=('~', '*', ':', '\n', '^')):
"""
@param fd: target file
@type fd: file descriptor
@param term: tuple of x12 terminators used
@type term: tuple(string, string, string, string)
"""
self.fd = fd
self.wr = sift_pyx12.x12file.X12Writer(fd, '~', '*', ':', '\n', '^')
self.seg_term = '~'
self.ele_term = '*'
self.subele_term = ':'
self.repetition_term = '^'
self.eol = '\n'
self.isa_control_num = None
self.gs_control_num = None
self.st_control_num = 0
self.vriic = '005010X231'
def visit_root_pre(self, errh):
"""
@param errh: Error handler
@type errh: L{error_handler.err_handler}
Uses:
isa_node seg_data
gs_node seg_data
"""
seg = errh.cur_isa_node.seg_data
#ISA*00* *00* *ZZ*ENCOUNTER *ZZ*00GR *030425*1501*U*00501*000065350*0*T*:~
self.isa_control_num = ('%s%s' % (time.strftime('%y%m%d'),
time.strftime('%H%M')))[1:]
self.gs_control_num = '%i' % (random.randint(10000000, 999999999))
icvn = seg.get_value('ISA12')
isa_seg = sift_pyx12.segment.Segment('ISA*00* *00* ',
self.seg_term, self.ele_term, self.subele_term)
isa_seg.set('05', seg.get_value('ISA07'))
isa_seg.set('06', seg.get_value('ISA08'))
isa_seg.set('07', seg.get_value('ISA05'))
isa_seg.set('08', seg.get_value('ISA06'))
isa_seg.set('09', time.strftime('%y%m%d')) # Date
isa_seg.set('10', time.strftime('%H%M')) # Time
isa_seg.set('11', self.repetition_term)
isa_seg.set('12', icvn)
isa_seg.set('13', self.isa_control_num) # ISA Interchange Control Number
isa_seg.set('14', '0') # No need for TA1 response to 999
isa_seg.set('15', seg.get_value('ISA15'))
isa_seg.set('16', self.subele_term)
self.wr.Write(isa_seg)
# GS*FA*ENCOUNTER*00GR*20030425*150153*653500001*X*005010
seg = errh.cur_gs_node.seg_data
gs_seg = sift_pyx12.segment.Segment('GS', '~', '*', ':')
gs_seg.set('01', 'FA')
gs_seg.set('02', seg.get_value('GS03').rstrip())
gs_seg.set('03', seg.get_value('GS02').rstrip())
gs_seg.set('04', time.strftime('%Y%m%d'))
gs_seg.set('05', time.strftime('%H%M%S'))
gs_seg.set('06', self.gs_control_num)
gs_seg.set('07', seg.get_value('GS07'))
gs_seg.set('08', self.vriic)
self.wr.Write(gs_seg)
def __get_isa_errors(self, err_isa):
"""
Build list of TA1 level errors
Only the first error is used
"""
isa_ele_err_map = {1: '010', 2: '011', 3: '012', 4: '013', 5: '005', 6: '006',
7: '007', 8: '008', 9: '014', 10: '015', 11: '016', 12: '017', 13: '018',
14: '019', 15: '020', 16: '027'
}
iea_ele_err_map = {1: '021', 2: '018'}
err_codes = [err[0] for err in err_isa.errors]
for elem in err_isa.elements:
for (err_cde, err_str, bad_value) in elem.errors:
# Ugly
if 'ISA' in err_str:
err_codes.append(isa_ele_err_map[elem.ele_pos])
elif 'IEA' in err_str:
err_codes.append(iea_ele_err_map[elem.ele_pos])
# return unique codes
return list(set(err_codes))
def visit_root_post(self, errh):
"""
@param errh: Error handler
@type errh: L{error_handler.err_handler}
"""
ge = sift_pyx12.segment.Segment('GE', '~', '*', ':')
ge.set('02', self.gs_control_num)
self.wr.Write(ge)
#TA1 segment
err_isa = errh.cur_isa_node
if err_isa.ta1_req == '1':
#seg = ['TA1', err_isa.isa_trn_set_id, err_isa.orig_date, \
# err_isa.orig_time]
ta1_seg = sift_pyx12.segment.Segment('TA1', '~', '*', ':')
ta1_seg.append(err_isa.isa_trn_set_id)
ta1_seg.append(err_isa.orig_date)
ta1_seg.append(err_isa.orig_time)
err_codes = self.__get_isa_errors(err_isa)
if err_codes:
err_cde = err_codes[0]
ta1_seg.append('R')
ta1_seg.append(err_cde)
else:
ta1_seg.append('A')
ta1_seg.append('000')
self.wr.Write(ta1_seg)
self.wr.Write(sift_pyx12.segment.Segment('IEA', '~', '*', ':'))
def visit_isa_pre(self, err_isa):
"""
@param err_isa: ISA Loop error handler
@type err_isa: L{error_handler.err_isa}
"""
def visit_isa_post(self, err_isa):
"""
@param err_isa: ISA Loop error handler
@type err_isa: L{error_handler.err_isa}
"""
pass
def visit_gs_pre(self, err_gs):
"""
@param err_gs: GS Loop error handler
@type err_gs: L{error_handler.err_gs}
"""
#ST
self.st_control_num += 1
st_seg = sift_pyx12.segment.Segment('ST*999', '~', '*', ':')
st_seg.set('02', '%04i' % (self.st_control_num))
st_seg.set('03', self.vriic)
self.wr.Write(st_seg)
ak1 = sift_pyx12.segment.Segment('AK1', '~', '*', ':')
ak1.set('01', err_gs.fic)
ak1.set('02', err_gs.gs_control_num)
ak1.set('03', err_gs.vriic)
self.wr.Write(ak1)
def __get_gs_errors(self, err_gs):
"""
Build list of GS level errors
"""
gs_ele_err_map = {6: '6', 8: '2'}
ge_ele_err_map = {2: '6'}
err_codes = [err[0] for err in err_gs.errors]
for elem in err_gs.elements:
for (err_cde, err_str, bad_value) in elem.errors:
# Ugly
if 'GS' in err_str:
if elem.ele_pos in gs_ele_err_map:
err_codes.append(gs_ele_err_map[elem.ele_pos])
else:
err_codes.append('1')
elif 'GE' in err_str:
if elem.ele_pos in ge_ele_err_map:
err_codes.append(ge_ele_err_map[elem.ele_pos])
else:
err_codes.append('1')
# return unique codes
ret = list(set(err_codes))
ret.sort()
return ret
def visit_gs_post(self, err_gs):
"""
@param err_gs: GS Loop error handler
@type err_gs: L{error_handler.err_gs}
"""
if not (err_gs.ack_code and err_gs.st_count_orig and
err_gs.st_count_recv):
if not err_gs.ack_code:
err_gs.ack_code = 'R'
if not err_gs.st_count_orig:
err_gs.st_count_orig = 0
if not err_gs.st_count_recv:
err_gs.st_count_recv = 0
seg_data = sift_pyx12.segment.Segment('AK9', '~', '*', ':')
seg_data.set('01', err_gs.ack_code)
seg_data.set('02', '%i' % err_gs.st_count_orig)
seg_data.set('03', '%i' % err_gs.st_count_recv)
count_ok = max(err_gs.st_count_recv - err_gs.count_failed_st(), 0)
seg_data.set('04', '%i' % (count_ok))
err_codes = self.__get_gs_errors(err_gs)
for err_cde in err_codes[:5]:
seg_data.append(err_cde)
self.wr.Write(seg_data)
#SE
seg_data = sift_pyx12.segment.Segment('SE', '~', '*', ':')
seg_data.append('%i' % (0))
seg_data.append('%04i' % self.st_control_num)
self.wr.Write(seg_data)
def visit_st_pre(self, err_st):
"""
@param err_st: ST Loop error handler
@type err_st: L{error_handler.err_st}
"""
if err_st is None:
raise EngineError('Cannot create AK2 : err_st is None')
if err_st.trn_set_id is None:
raise EngineError('Cannot create AK2: err_st.trn_set_id was not set')
if err_st.trn_set_control_num is None:
raise EngineError('Cannot create AK2: err_st.trn_set_control_num was not set')
if err_st.vriic is None:
raise EngineError('Cannot create AK2: err_st.vriic was not set')
seg_data = sift_pyx12.segment.Segment('AK2', '~', '*', ':')
seg_data.set('01', err_st.trn_set_id)
seg_data.set('02', err_st.trn_set_control_num.strip())
seg_data.set('03', err_st.vriic)
self.wr.Write(seg_data)
def __get_st_errors(self, err_st):
"""
Build list of ST level errors
"""
st_ele_err_map = {1: '6', 2: '7'}
se_ele_err_map = {1: '6', 2: '7'}
err_codes = [err[0] for err in err_st.errors]
if err_st.child_err_count() > 0:
err_codes.append('5')
for elem in err_st.elements:
for (err_cde, err_str, bad_value) in elem.errors:
# Ugly
if 'ST' in err_str:
err_codes.append(st_ele_err_map[elem.ele_pos])
elif 'SE' in err_str:
err_codes.append(se_ele_err_map[elem.ele_pos])
# return unique codes
ret = list(set(err_codes))
ret.sort()
return ret
def visit_st_post(self, err_st):
"""
@param err_st: ST Loop error handler
@type err_st: L{error_handler.err_st}
"""
if err_st.ack_code is None:
raise EngineError('err_st.ack_cde variable not set')
seg_data = sift_pyx12.segment.Segment('IK5', '~', '*', ':')
seg_data.set('01', err_st.ack_code)
err_codes = self.__get_st_errors(err_st)
for err_code in err_codes[:5]:
seg_data.append(err_code)
self.wr.Write(seg_data)
def visit_seg(self, err_seg):
"""
@param err_seg: Segment error handler
@type err_seg: L{error_handler.err_seg}
"""
valid_IK3_codes = ('1', '2', '3', '4', '5', '6', '7', '8', 'I4', 'I6', 'I7', 'I8', 'I9')
seg_base = sift_pyx12.segment.Segment('IK3', '~', '*', ':')
seg_base.set('01', err_seg.seg_id)
seg_base.set('02', '%i' % err_seg.seg_count)
if err_seg.ls_id:
seg_base.set('03', err_seg.ls_id)
#else:
# seg_base.set('')
seg_str = seg_base.format('~', '*', ':')
errors = [x[0] for x in err_seg.errors]
if 'SEG1' in errors:
if '8' not in errors:
errors.append('8')
errors = [x for x in errors if x != 'SEG1']
for err_cde in list(set(errors)):
if err_cde in valid_IK3_codes: # unique codes
seg_data = sift_pyx12.segment.Segment(seg_str, '~', '*', ':')
seg_data.set('IK304', err_cde)
self.wr.Write(seg_data)
# todo: add segment context
# todo: add business unit context
if err_seg.child_err_count() > 0 and '8' not in errors:
seg_data = sift_pyx12.segment.Segment(seg_str, '~', '*', ':')
seg_data.set('IK304', '8')
self.wr.Write(seg_data)
def visit_ele(self, err_ele):
"""
@param err_ele: Segment error handler
@type err_ele: L{error_handler.err_ele}
"""
valid_IK4_codes = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '12', '13',
'I10', 'I11', 'I12', 'I13', 'I6', 'I9')
seg_base = sift_pyx12.segment.Segment('IK4', '~', '*', ':')
seg_base.set('01-1', '%i' % (err_ele.ele_pos))
if err_ele.subele_pos:
seg_base.set('01-2', '%i' % (err_ele.subele_pos))
if err_ele.repeat_pos:
seg_base.set('01-3', '%i' % (err_ele.repeat_pos))
if err_ele.ele_ref_num:
seg_base.set('02', err_ele.ele_ref_num)
seg_str = seg_base.format('~', '*', ':')
for (err_cde, err_str, bad_value) in err_ele.errors:
if err_cde in valid_IK4_codes:
seg_data = sift_pyx12.segment.Segment(seg_str, '~', '*', ':')
seg_data.set('IK403', err_cde)
if bad_value:
seg_data.set('IK404', bad_value)
# todo: add element context
self.wr.Write(seg_data)
| StarcoderdataPython |
3496122 | <gh_stars>0
# -*- coding: utf-8 -*-
import toml
import os, sys
import re
####################################################################################################
class to_namespace(object):
def __init__(self, adict):
self.__dict__.update(adict)
def get(self, key):
return self.__dict__.get(key)
def log(*s, n=0, m=1):
sspace = "#" * n
sjump = "\n" * m
print(sjump, sspace, s, sspace, flush=True)
####################################################################################################
def os_package_root_path(add_path="",n=0):
from pathlib import Path
add_path = os.path.join(Path(__file__).parent.absolute(), add_path)
# print("os_package_root_path,check", add_path)
return add_path
def os_package_root_path(filepath, sublevel=0, path_add=""):
"""
get the module package root folder
"""
from pathlib import Path
path = Path(os.path.realpath(filepath)).parent
for i in range(1, sublevel + 1):
path = path.parent
path = os.path.join(path.absolute(), path_add)
return path
def os_file_current_path():
val = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# return current_dir + "/"
# Path of current file
# from pathlib import Path
# val = Path().absolute()
val = str(os.path.join(val, ""))
# print(val)
return val
def log(*s, n=0,m=1):
sspace = "#" * n
sjump = "\n" * m
print(sjump, sspace, s, sspace, flush=True)
def load_config(args, config_file, config_mode, verbose=0):
##### Load file dict_pars as dict namespace #############################
import json
print(config_file) if verbose else None
try:
pars = json.load(open(config_file, mode="r") )
# print(arg.param_file, model_pars)
pars = pars[config_mode] # test / prod
print(config_file, pars) if verbose else None
### Overwrite dict_pars from CLI input and merge with toml file
for key, x in vars(args).items():
if x is not None: # only values NOT set by CLI
pars[key] = x
# print(model_pars)
pars = to_namespace(pars) # like object/namespace model_pars.instance
return pars
except Exception as e:
print(e)
return args
def val(x,xdefault) :
try :
return x if x is not None else xdefault
except :
return xdefault
def get_recursive_files2(folderPath, ext):
results = os.listdir(folderPath)
outFiles = []
for file in results:
if os.path.isdir(os.path.join(folderPath, file)):
outFiles += get_recursive_files(os.path.join(folderPath, file), ext)
elif re.match(ext, file):
outFiles.append(file)
return outFiles
def get_recursive_files(folderPath, ext='/*model*/*.py'):
import glob
files = glob.glob( folderPath + ext, recursive=True)
return files
####################################################################################################
########## TF specific #############################################################################
def load_tf(foldername, filename):
"""
https://www.mlflow.org/docs/latest/python_api/mlflow.tensorflow.html#
"""
import mlflow.tensorflow
import tensorflow as tf
model_uri = foldername + "/" + filename
tf_graph = tf.Graph()
tf_sess = tf.Session(graph=tf_graph)
with tf_graph.as_default():
signature_def = mlflow.tensorflow.load_model(model_uri=model_uri,
tf_sess=tf_sess)
input_tensors = [tf_graph.get_tensor_by_name(input_signature.name)
for _, input_signature in signature_def.inputs.items()]
output_tensors = [tf_graph.get_tensor_by_name(output_signature.name)
for _, output_signature in signature_def.outputs.items()]
return input_tensors, output_tensors
def save_tf(sess, file_path):
import tensorflow as tf
saver = tf.compat.v1.train.Saver()
return saver.save(sess, file_path)
####################################################################################################
########## pyTorch specific ########################################################################
def load_tch(foldername, filename):
return 1
def save_tch(foldername, filename):
return 1
def load_pkl(foldername, filename):
return 1
####################################################################################################
########## Other model specific ####################################################################
def load_pkl(folder_name, filename=None):
pass
"""
import glob
path = "."
files = glob.glob(path + '/*model*/*.py', recursive=True)
files
from datetime import datetime, timedelt
a
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
sns.set()
class ModelFactory():
def model_create(self, model_name, datasampler, hyper_parameters={'epoch':5}):
if model_name == 'lstm':
datasampler = DataSampler() # the reader object for the input data
model = LSTM(datasampler)
model.set_pars(hyper_parameters)
return model
# example usage
# data = DataSampler(file_name='x.csv', timestamp=5)
# this datasampler is sent to the model
class DataSampler():
def __init__(self, file_name = '../dataset/GOOG-year.csv', timestamp =5):
self.data_pars = pd.read_csv()
self.date_ori = pd.to_datetime(data_pars.iloc[:, 0]).tolist()
self.minmax = MinMaxScaler()
self.timestamp = timestamp
self.df_log = self.preprocess_df()
def preprocess_df(self):
self.minmax.fit(self.data_pars.iloc[:, 1:].astype('float32'))
df_log = minmax.transform(data_pars.iloc[:, 1:].astype('float32'))
df_log = pd.DataFrame(df_log)
return df_log
def batch_sampler(self, start, end):
# sampler for training set
for k in range(0, start, end):
index = min(k + timestamp, df_log.shape[0] -1)
batch_x = np.expand_dims( df_log.iloc[k : index, :].values, axis = 0)
batch_y = df_log.iloc[k + 1 : index + 1, :].values
yield (batch_x, batch_y)
def train_batch_sampler(self):
return batch_sampler(self.df_log.shape[0] - 1, self.timestamp)
def test_batch_sampler(self):
return batch_sampler((self.df_log.shape[0] // self.timestamp) * self.timestamp, self.timestamp)
def get_n_samples_per_batch(self):
return self.df_log.shape[0] // self.timestamp
class BaseModelDl(object):
#Base Model class used for models under Dl class
#acting as parent class
def __init__(self):
self.datasampler = None
self.name = ''
self.epoch = 5
self.learning_rate = 0.01
self.sess = None
def get_pars(self):
# epoch and learning rate exists in all models
return {
'epoch': self.epoch,
'learning_rate': self.learning_rate
}
def set_pars(self, **parameters):
# this function is common for all children classes
for parameter, value in parameters.items():
if hasattr(self,parameter):
setattr(self, parameter, value)
else:
raise AttributeError('Class {} does not have parameter {}'.format(
self.name, parameter
))
return self
def build_model(self):
# used to create placeholders and optimizer based on parameters set
# called after setting the parameters
pass
def fit(self):
pass
def predict(self,X):
pass
def load(self, model_path):
# model_path=parent_folder/model.ckpt
saver = tf.train.Saver()
saver.load(self.sess, model_path)
def save(self, model_path):
# model_path=parent_folder/model.ckpt
saver = tf.train.Saver()
saver.save(self.sess, model_path)
class LSTM(BaseModelDl):
def __init__(self, datasampler):
super(LSTM, self).__init__()
self.datasampler = datasampler
self.name = 'lstm'
self.num_layers = 1
self.size_layer = 128
self.timestamp = 5
self.dropout_rate = 0.7
self.future_day = 50
self.learning_rate = 0.01
self.feat_size = 1
self.output_size = 1
self.forget_bias = 0.1
self.model = None
def get_pars(self):
return {
'num_layers': self.num_layers,
'size_layer': self.size_layer,
'dropout_rate': self.dropout_rate,
'epoch': self.epoch,
'learning_rate': 0.01 ,
'output_size': self.output_size,
'feat_size': self.feat_size,
'forget_bias': self.forget_bias
}
def build_model(self):
def lstm_cell(size_layer):
return tf.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = False)
rnn_cells = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell(self.size_layer) for _ in range(self.num_layers)],
state_is_tuple = False,)
self.X = tf.placeholder(tf.float32, (None, None, self.feat_size))
self.Y = tf.placeholder(tf.float32, (None, self.output_size))
drop = tf.contrib.rnn.DropoutWrapper(rnn_cells, output_keep_prob = self.forget_bias)
self.hidden_layer = tf.placeholder( tf.float32,
(None, num_layers * 2 * size_layer))
self.outputs, self.last_state = tf.nn.dynamic_rnn(
drop, self.X, initial_state = self.hidden_layer, dtype = tf.float32
)
self.logits = tf.layers.dense(self.outputs[-1], output_size)
self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
def fit(self):
for i in range(self.epoch):
init_value = np.zeros((1, self.num_layers * 2 * self.size_layer))
for batch_x, batch_y in self.datasampler.train_batch_sampler():
last_state, _, loss = sess.run(
[self.last_state, self.optimizer, self.cost],
feed_dict = {
self.X: batch_x,
self.Y: batch_y,
self.hidden_layer: init_value,
},
)
init_value = last_state
total_loss += loss
total_loss /= self.datasampler.get_n_samples_per_batch()
if (i + 1) % 100 == 0:
print('epoch:', i + 1, 'avg loss:', total_loss)
def predict(self,):
# takes a sampler as the one in datasampler class
# in this function it should take a sampler
"""
| StarcoderdataPython |
9691069 | <reponame>joshimbriani/Pyarks<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def universalNameToID(name):
if name == "IOA" or name == "Islands of Adventure":
return 10000
elif name == "USF" or name == "Universal Studios Florida":
return 10010
elif name == "USH" or name == "Universal Studios Hollywood":
return 13825
elif name == "VB" or name == "Volcano Bay":
return 13801
else:
return -1
def USJTranslate(name):
if name == "ハローキティのカップケーキ・ドリーム":
return "Hello Kitty's Cupcake Dream"
elif name == "エルモのゴーゴー・スケートボード":
return "Elmo's go-go skateboard"
elif name == "モッピーのバルーン・トリップ":
return "Mobi Balloon Trip"
elif name == "フライング・スヌーピー":
return "Flying Snoopy"
elif name == "スヌーピーのグレートレース™":
return "Snoopy's Great Race ™"
elif name == "アメージング・アドベンチャー・オブ・スパイダーマン・ザ・ライド 4K3D":
return "Amazing Adventure of Spider-Man The Ride 4K 3 D"
elif name == "妖怪ウォッチ・ザ・リアル 4":
return "Yokai Watch The Real 4"
elif name == "ジュラシック・パーク・ザ・ライド®":
return "Jurassic Park - The Ride ®"
elif name == "ジョーズ®":
return "Jaws ®"
elif name == "セサミストリート 4-D ムービーマジック™":
return "Sesame Street 4-D Movie Magic ™"
elif name == "フライト・オブ・ザ・ヒッポグリフ™":
return "Flight of the Hippogriff ™"
elif name == "ハリウッド・ドリーム・ザ・ライド":
return "Hollywood · Dream · The · Ride"
elif name == "ハリウッド・ドリーム・ザ・ライド~バックドロップ~":
return "Hollywood · Dream · The Ride ~ Backdrop ~"
elif name == "ザ・フライング・ダイナソー":
return "The Flying Dinosaur"
elif name == "ハリー・ポッター・アンド・ザ・フォービドゥン・ジャーニー™":
return "Harry Potter and the Forbidden Journey ™"
elif name == "スペース・ファンタジー・ザ・ライド":
return "Space Fantasy the Ride"
elif name == "バックドラフト®":
return "Backdraft ®"
elif name == "シュレック 4-D アドベンチャー™":
return "Shrek 4-D Adventure ™"
elif name == "休止中":
return "Inactive"
else:
return "No translation"
def seaworldNameToID(name):
if name == "BGT":
return "BG_TPA"
elif name == "SWO":
return "SW_MCO"
elif name == "SWSD":
return "SW_SAN"
elif name == "SWSA":
return "SW_SAT"
elif name == "BGW":
return "BG_PHF"
else:
return "BG_TPA" | StarcoderdataPython |
12808631 | <filename>keystoneclient/v3/role_assignments.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
from keystoneclient import exceptions
class RoleAssignment(base.Resource):
"""Represents an Identity role assignment.
Attributes:
* role: an object which contains a role uuid
* user or group: an object which contains either a user or
group uuid
* scope: an object which has either a project or domain object
containing an uuid
"""
pass
class RoleAssignmentManager(base.CrudManager):
"""Manager class for manipulating Identity roles assignments."""
resource_class = RoleAssignment
collection_key = 'role_assignments'
key = 'role_assignment'
def _check_not_user_and_group(self, user, group):
if user and group:
msg = 'Specify either a user or group, not both'
raise exceptions.ValidationError(msg)
def _check_not_domain_and_project(self, domain, project):
if domain and project:
msg = 'Specify either a domain or project, not both'
raise exceptions.ValidationError(msg)
def list(self, user=None, group=None, project=None, domain=None, role=None,
effective=False):
"""Lists role assignments.
If no arguments are provided, all role assignments in the
system will be listed.
If both user and group are provided, a ValidationError will be
raised. If both domain and project are provided, it will also
raise a ValidationError.
:param user: User to be used as query filter. (optional)
:param group: Group to be used as query filter. (optional)
:param project: Project to be used as query filter.
(optional)
:param domain: Domain to be used as query
filter. (optional)
:param role: Role to be used as query filter. (optional)
:param boolean effective: return effective role
assignments. (optional)
"""
self._check_not_user_and_group(user, group)
self._check_not_domain_and_project(domain, project)
query_params = {}
if user:
query_params['user.id'] = base.getid(user)
if group:
query_params['group.id'] = base.getid(group)
if project:
query_params['scope.project.id'] = base.getid(project)
if domain:
query_params['scope.domain.id'] = base.getid(domain)
if role:
query_params['role.id'] = base.getid(role)
if effective:
query_params['effective'] = effective
return super(RoleAssignmentManager, self).list(**query_params)
def create(self, **kwargs):
raise exceptions.MethodNotImplemented('Create not supported for'
' role assignments')
def update(self, **kwargs):
raise exceptions.MethodNotImplemented('Update not supported for'
' role assignments')
def get(self, **kwargs):
raise exceptions.MethodNotImplemented('Get not supported for'
' role assignments')
def find(self, **kwargs):
raise exceptions.MethodNotImplemented('Find not supported for'
' role assignments')
def put(self, **kwargs):
raise exceptions.MethodNotImplemented('Put not supported for'
' role assignments')
def delete(self, **kwargs):
raise exceptions.MethodNotImplemented('Delete not supported for'
' role assignments')
| StarcoderdataPython |
3260653 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import sys
from pathlib import Path
# This file here mimics how the InnerEye code would be used as a git submoTestdule. The test script will
# copy the InnerEye code to a folder Submodule. The test will then invoke the present file as a runner,
# and train a model in AzureML.
repository_root = Path(__file__).absolute().parent.parent
def add_package_to_sys_path_if_needed() -> None:
"""
Checks if the Python paths in sys.path already contain the /Submodule folder. If not, add it.
"""
is_package_in_path = False
innereye_submodule_folder = repository_root / "Submodule"
for path_str in sys.path:
path = Path(path_str)
if path == innereye_submodule_folder:
is_package_in_path = True
break
if not is_package_in_path:
print(f"Adding {innereye_submodule_folder} to sys.path")
sys.path.append(str(innereye_submodule_folder))
def main() -> None:
try:
from InnerEye import ML # noqa: 411
except:
add_package_to_sys_path_if_needed()
from InnerEye.ML import runner
from InnerEye.Common import fixed_paths
print(f"Repository root: {repository_root}")
runner.run(project_root=repository_root,
yaml_config_file=fixed_paths.SETTINGS_YAML_FILE,
post_cross_validation_hook=None)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1727244 | <gh_stars>0
from skype_log import Skypper
my_log = Skypper()
print(my_log.get_skype_path())
print(my_log.get_skype_user())
print(my_log.get_skype_database_path())
my_log2 = Skypper(skype_user="no_one")
#print(my_log2.get_skype_user())
| StarcoderdataPython |
313685 | <filename>plato/agent/component/dialogue_policy/deep_learning/reinforce_policy.py
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from .. import dialogue_policy
from plato.agent.component.dialogue_policy.slot_filling_policy \
import HandcraftedPolicy
from plato.domain.ontology import Ontology
from plato.domain.database import DataBase
from plato.dialogue.action import DialogueAct, DialogueActItem, Operator
from plato.dialogue.state import SlotFillingDialogueState
from plato.agent.component.user_simulator.\
agenda_based_user_simulator.agenda_based_us import AgendaBasedUS
from copy import deepcopy
import numpy as np
import random
import os
import pickle
"""
ReinforcePolicy implements the REINFORCE algorithm for dialogue policy
learning.
"""
class ReinforcePolicy(dialogue_policy.DialoguePolicy):
def __init__(self, args):
"""
Initialize parameters and internal structures
:param args: the policy's arguments
"""
super(ReinforcePolicy, self).__init__()
self.ontology = None
if 'ontology' in args:
ontology = args['ontology']
if isinstance(ontology, Ontology):
self.ontology = ontology
else:
raise ValueError('ReinforcePolicy Unacceptable '
'ontology type %s ' % ontology)
else:
raise ValueError('ReinforcePolicy: No ontology provided')
self.database = None
if 'database' in args:
database = args['database']
if isinstance(database, DataBase):
self.database = database
else:
raise ValueError('ReinforcePolicy: Unacceptable '
'database type %s ' % database)
else:
raise ValueError('ReinforcePolicy: No database provided')
self.agent_id = args['agent_id'] if 'agent_id' in args else 0
self.agent_role = \
args['agent_role'] if 'agent_role' in args else 'system'
domain = args['domain'] if 'domain' in args else None
self.alpha = args['alpha'] if 'alpha' in args else 0.2
self.gamma = args['gamma'] if 'gamma' in args else 0.95
self.epsilon = args['epsilon'] if 'epsilon' in args else 0.95
self.alpha_decay_rate = \
args['alpha_decay'] if 'alpha_decay' in args else 0.995
self.exploration_decay_rate = \
args['epsilon_decay'] if 'epsilon_decay' in args else 0.9995
self.IS_GREEDY = False
self.policy_path = None
self.weights = None
self.sess = None
# System and user expert policies (optional)
self.warmup_policy = None
self.warmup_simulator = None
if self.agent_role == 'system':
# Put your system expert policy here
self.warmup_policy = HandcraftedPolicy({
'ontology': self.ontology})
elif self.agent_role == 'user':
usim_args = \
dict(
zip(['ontology', 'database'],
[self.ontology, self.database]))
# Put your user expert policy here
self.warmup_simulator = AgendaBasedUS(usim_args)
self.tf_scope = "policy_" + self.agent_role + '_' + str(self.agent_id)
# Default value
self.is_training = True
# Extract lists of slots that are frequently used
self.informable_slots = \
deepcopy(list(self.ontology.ontology['informable'].keys()))
self.requestable_slots = \
deepcopy(self.ontology.ontology['requestable'])
self.system_requestable_slots = \
deepcopy(self.ontology.ontology['system_requestable'])
if not domain:
# Default to CamRest dimensions
self.NStateFeatures = 56
# Default to CamRest actions
self.dstc2_acts = ['inform', 'offer', 'request', 'canthelp',
'affirm', 'negate', 'deny', 'ack', 'thankyou',
'bye', 'reqmore', 'hello', 'welcomemsg',
'expl-conf', 'select', 'repeat', 'reqalts',
'confirm-domain', 'confirm']
else:
# Try to identify number of state features
if domain in ['CamRest', 'SFH', 'SlotFilling']:
d_state = \
SlotFillingDialogueState(
{'slots': self.system_requestable_slots})
# Plato does not use action masks (rules to define which
# actions are valid from each state) and so training can
# be harder. This becomes easier if we have a smaller
# action set.
# Sub-case for CamRest
if domain == 'CamRest':
# Does not include inform and request that are modelled
# together with their arguments
self.dstc2_acts_sys = ['offer', 'canthelp', 'affirm',
'deny', 'ack', 'bye',
'reqmore', 'welcomemsg',
'expl-conf', 'select', 'repeat',
'confirm-domain', 'confirm']
# Does not include inform and request that are modelled
# together with their arguments
self.dstc2_acts_usr = ['affirm', 'negate', 'deny', 'ack',
'thankyou', 'bye',
'reqmore', 'hello', 'expl-conf',
'repeat', 'reqalts', 'restart',
'confirm']
else:
print('Warning! domain has not been defined. Using '
'Slot-Filling dialogue State')
d_state = \
SlotFillingDialogueState({'slots': self.informable_slots})
d_state.initialize()
self.NStateFeatures = len(self.encode_state(d_state))
print('Reinforce policy {0} automatically determined '
'number of state features: {1}'
.format(self.agent_role, self.NStateFeatures))
if domain == 'CamRest' and self.dstc2_acts_sys:
if self.agent_role == 'system':
self.NActions = \
len(self.dstc2_acts_sys) + \
len(self.requestable_slots) + \
len(self.system_requestable_slots)
self.NOtherActions = \
len(self.dstc2_acts_usr) + \
2 * len(self.requestable_slots)
elif self.agent_role == 'user':
self.NActions = \
len(self.dstc2_acts_usr) + \
2 * len(self.requestable_slots)
self.NOtherActions = \
len(self.dstc2_acts_sys) + \
len(self.requestable_slots) + \
len(self.system_requestable_slots)
else:
if self.agent_role == 'system':
self.NActions = \
3 + len(self.system_requestable_slots) + \
len(self.requestable_slots)
self.NOtherActions = \
2 + len(self.requestable_slots) +\
len(self.requestable_slots)
elif self.agent_role == 'user':
self.NActions = \
2 + len(self.requestable_slots) + \
len(self.requestable_slots)
self.NOtherActions = \
3 + len(self.system_requestable_slots) + \
len(self.requestable_slots)
print('Reinforce {0} policy Number of Actions: {1}'
.format(self.agent_role, self.NActions))
def initialize(self, args):
"""
Initialize internal structures at the beginning of each dialogue
:return: Nothing
"""
if 'is_training' in args:
self.is_training = bool(args['is_training'])
if self.agent_role == 'user' and self.warmup_simulator:
if 'goal' in args:
self.warmup_simulator.initialize({args['goal']})
else:
print('WARNING ! No goal provided for Reinforce policy '
'user simulator @ initialize')
self.warmup_simulator.initialize({})
if 'policy_path' in args:
self.policy_path = args['policy_path']
if 'learning_rate' in args:
self.alpha = args['learning_rate']
if 'learning_decay_rate' in args:
self.alpha_decay_rate = args['learning_decay_rate']
if 'discount_factor' in args:
self.gamma = args['discount_factor']
if 'exploration_rate' in args:
self.alpha = args['exploration_rate']
if 'exploration_decay_rate' in args:
self.exploration_decay_rate = args['exploration_decay_rate']
if self.weights is None:
self.weights = np.random.rand(self.NStateFeatures, self.NActions)
def restart(self, args):
"""
Re-initialize relevant parameters / variables at the beginning of each
dialogue.
:return: nothing
"""
if self.agent_role == 'user' and self.warmup_simulator:
if 'goal' in args:
self.warmup_simulator.initialize(args)
else:
print('WARNING! No goal provided for Reinforce '
'policy user simulator @ restart')
self.warmup_simulator.initialize({})
def next_action(self, state):
"""
Consults the policy to produce the agent's response
:param state: the current dialogue state
:return: a list of dialogue acts, representing the agent's response
"""
if self.is_training and random.random() < self.epsilon:
if random.random() < 0.5:
print('--- {0}: Selecting warmup action.'
.format(self.agent_role))
if self.agent_role == 'system':
return self.warmup_policy.next_action(state)
else:
self.warmup_simulator.receive_input(
state.user_acts, state.user_goal)
return self.warmup_simulator.respond()
else:
print('--- {0}: Selecting random action.'
.format(self.agent_role))
return self.decode_action(
random.choice(
range(0, self.NActions)),
self.agent_role == "system")
# Probabilistic policy: Sample from action wrt probabilities
probs = self.calculate_policy(self.encode_state(state))
if any(np.isnan(probs)):
print('WARNING! NAN detected in action probabilities! Selecting '
'random action.')
return self.decode_action(
random.choice(range(0, self.NActions)),
self.agent_role == "system")
if self.IS_GREEDY:
# Get greedy action
max_pi = max(probs)
maxima = [i for i, j in enumerate(probs) if j == max_pi]
# Break ties randomly
if maxima:
sys_acts = \
self.decode_action(
random.choice(maxima), self.agent_role == 'system')
else:
print(
f'--- {self.agent_role}: Warning! No maximum value '
f'identified for policy. Selecting random action.')
return self.decode_action(
random.choice(
range(0, self.NActions)),
self.agent_role == 'system')
else:
# Pick from top 3 actions
top_3 = np.argsort(-probs)[0:2]
sys_acts = \
self.decode_action(
random.choices(
top_3, probs[top_3])[0], self.agent_role == 'system')
return sys_acts
@staticmethod
def softmax(x):
"""
Calculates the softmax of x
:param x: a number
:return: the softmax of the number
"""
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
@staticmethod
def softmax_gradient(x):
"""
Calculates the gradient of the softmax
:param x: a number
:return: the gradient of the softmax
"""
x = np.asarray(x)
x_reshaped = x.reshape(-1, 1)
return np.diagflat(x_reshaped) - np.dot(x_reshaped, x_reshaped.T)
def calculate_policy(self, state):
"""
Calculates the probabilities for each action from the given state
:param state: the current dialogue state
:return: probabilities of actions
"""
dot_prod = np.dot(state, self.weights)
exp_dot_prod = np.exp(dot_prod)
return exp_dot_prod / np.sum(exp_dot_prod)
def train(self, dialogues):
"""
Train the policy network
:param dialogues: dialogue experience
:return: nothing
"""
# If called by accident
if not self.is_training:
return
for dialogue in dialogues:
discount = self.gamma
if len(dialogue) > 1:
dialogue[-2]['reward'] = dialogue[-1]['reward']
rewards = [t['reward'] for t in dialogue]
norm_rewards = \
(rewards - np.mean(rewards)) / (np.std(rewards) + 0.000001)
for (t, turn) in enumerate(dialogue):
act_enc = self.encode_action(turn['action'],
self.agent_role == 'system')
if act_enc < 0:
continue
state_enc = self.encode_state(turn['state'])
if len(state_enc) != self.NStateFeatures:
raise ValueError(f'Reinforce dialogue policy '
f'{self.agent_role} mismatch in state'
f'dimensions: State Features: '
f'{self.NStateFeatures} != State '
f'Encoding Length: {len(state_enc)}')
# Calculate the gradients
# Call policy again to retrieve the probability of the
# action taken
probabilities = self.calculate_policy(state_enc)
softmax_deriv = self.softmax_gradient(probabilities)[act_enc]
log_policy_grad = softmax_deriv / probabilities[act_enc]
gradient = \
np.asarray(
state_enc)[None, :].transpose().dot(
log_policy_grad[None, :])
gradient = np.clip(gradient, -1.0, 1.0)
# Train policy
self.weights += \
self.alpha * gradient * norm_rewards[t] * discount
self.weights = np.clip(self.weights, -1, 1)
discount *= self.gamma
if self.alpha > 0.01:
self.alpha *= self.alpha_decay_rate
if self.epsilon > 0.5:
self.epsilon *= self.exploration_decay_rate
print(f'REINFORCE train, alpha: {self.alpha}, epsilon: {self.epsilon}')
def encode_state(self, state):
"""
Encodes the dialogue state into a vector.
:param state: the state to encode
:return: int - a unique state encoding
"""
temp = [int(state.is_terminal_state), int(state.system_made_offer)]
if self.agent_role == 'user':
# The user agent needs to know which constraints and requests
# need to be communicated and which of them
# actually have.
if state.user_goal:
for c in self.informable_slots:
if c != 'name':
if c in state.user_goal.constraints:
temp.append(1)
else:
temp.append(0)
for c in self.informable_slots:
if c != 'name':
if c in state.user_goal.actual_constraints and \
state.user_goal.actual_constraints[c].value:
temp.append(1)
else:
temp.append(0)
for r in self.requestable_slots:
if r in state.user_goal.requests:
temp.append(1)
else:
temp.append(0)
for r in self.requestable_slots:
if r in state.user_goal.actual_requests and \
state.user_goal.actual_requests[r].value:
temp.append(1)
else:
temp.append(0)
else:
temp += [0] * 2 * (len(self.informable_slots) - 1 +
len(self.requestable_slots))
if self.agent_role == 'system':
for value in state.slots_filled.values():
# This contains the requested slot
temp.append(1) if value else temp.append(0)
for r in self.requestable_slots:
temp.append(1) if r == state.requested_slot else temp.append(0)
return temp
def encode_action(self, actions, system=True):
"""
Encode the action, given the role. Note that does not have to match
the agent's role, as the agent may be encoding another agent's action
(e.g. a system encoding the previous user act).
:param actions: actions to be encoded
:param system: whether the role whose action we are encoding is a
'system'
:return: the encoded action
"""
# TODO: Handle multiple actions
if not actions:
print('WARNING: Reinforce dialogue policy action encoding called '
'with empty actions list (returning 0).')
return -1
action = actions[0]
if system:
if self.dstc2_acts_sys and action.intent in self.dstc2_acts_sys:
return self.dstc2_acts_sys.index(action.intent)
if action.intent == 'request':
return len(self.dstc2_acts_sys) + \
self.system_requestable_slots.index(
action.params[0].slot)
if action.intent == 'inform':
return len(self.dstc2_acts_sys) + \
len(self.system_requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
else:
if self.dstc2_acts_usr and action.intent in self.dstc2_acts_usr:
return self.dstc2_acts_usr.index(action.intent)
if action.intent == 'request':
return len(self.dstc2_acts_usr) + \
self.requestable_slots.index(action.params[0].slot)
if action.intent == 'inform':
return len(self.dstc2_acts_usr) + \
len(self.requestable_slots) + \
self.requestable_slots.index(action.params[0].slot)
# Default fall-back action
print('Reinforce ({0}) olicy action encoder warning: Selecting '
'default action (unable to encode: {1})!'
.format(self.agent_role, action))
return -1
def decode_action(self, action_enc, system=True):
"""
Decode the action, given the role. Note that does not have to match
the agent's role, as the agent may be decoding another agent's action
(e.g. a system decoding the previous user act).
:param action_enc: action encoding to be decoded
:param system: whether the role whose action we are decoding is a
'system'
:return: the decoded action
"""
if system:
if action_enc < len(self.dstc2_acts_sys):
return [DialogueAct(self.dstc2_acts_sys[action_enc], [])]
if action_enc < len(self.dstc2_acts_sys) + \
len(self.system_requestable_slots):
return [DialogueAct(
'request',
[DialogueActItem(
self.system_requestable_slots[
action_enc - len(self.dstc2_acts_sys)],
Operator.EQ, '')])]
if action_enc < len(self.dstc2_acts_sys) + \
len(self.system_requestable_slots) + \
len(self.requestable_slots):
index = action_enc - len(self.dstc2_acts_sys) - \
len(self.system_requestable_slots)
return [DialogueAct(
'inform',
[DialogueActItem(
self.requestable_slots[index], Operator.EQ, '')])]
else:
if action_enc < len(self.dstc2_acts_usr):
return [DialogueAct(self.dstc2_acts_usr[action_enc], [])]
if action_enc < len(self.dstc2_acts_usr) + \
len(self.requestable_slots):
return [DialogueAct(
'request',
[DialogueActItem(
self.requestable_slots[
action_enc - len(self.dstc2_acts_usr)],
Operator.EQ,
'')])]
if action_enc < len(self.dstc2_acts_usr) + \
2 * len(self.requestable_slots):
return [DialogueAct(
'inform',
[DialogueActItem(
self.requestable_slots[
action_enc - len(self.dstc2_acts_usr) -
len(self.requestable_slots)],
Operator.EQ,
'')])]
# Default fall-back action
print('Reinforce dialogue policy ({0}) policy action decoder warning: '
'Selecting default action (index: {1})!'
.format(self.agent_role, action_enc))
return [DialogueAct('bye', [])]
def save(self, path=None):
"""
Saves the policy model to the provided path
:param path: path to save the model to
:return:
"""
# Don't save if not training
if not self.is_training:
return
if not path:
path = 'models/policies/reinforce.pkl'
print('No policy file name provided. Using default: {0}'
.format(path))
# If the directory does not exist, create it
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), exist_ok=True)
obj = {'weights': self.weights,
'alpha': self.alpha,
'alpha_decay_rate': self.alpha_decay_rate,
'epsilon': self.epsilon,
'exploration_decay_rate': self.exploration_decay_rate}
with open(path, 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
def load(self, path=None):
"""
Load the policy model from the provided path
:param path: path to load the model from
:return:
"""
if not path:
print('No dialogue policy loaded.')
return
if isinstance(path, str):
if os.path.isfile(path):
with open(path, 'rb') as file:
obj = pickle.load(file)
if 'weights' in obj:
self.weights = obj['weights']
if 'alpha' in obj:
self.alpha = obj['alpha']
if 'alpha_decay_rate' in obj:
self.alpha_decay_rate = obj['alpha_decay_rate']
if 'epsilon' in obj:
self.epsilon = obj['epsilon']
if 'exploration_decay_rate' in obj:
self.exploration_decay_rate = \
obj['exploration_decay_rate']
print('Reinforce policy loaded from {0}.'
.format(path))
else:
print('Warning! Reinforce policy file %s not found'
% path)
else:
print('Warning! Unacceptable value for Reinforce policy '
'file name: %s ' % path)
| StarcoderdataPython |
5173836 | from recogn_img.model import PredResult
from recogn_img.recogn import Recognizer
from recogn_img.utils import read_classes
from recogn_img.render import RecognRender
| StarcoderdataPython |
1674285 | <filename>models/__init__.py
from models.bilstm import BiLSTM
from models.transformer import Transformer
from models.transformer_bilstm import TransformerBiLSTM
| StarcoderdataPython |
8057320 | import telebot
from telebot import types
import keyboard as kb
TOKEN = '<KEY>'
bot = telebot.TeleBot(TOKEN)
PHOTOMENU = '<KEY>'
PHOTOROLL = '<KEY>'
PHOTOSUSI = '<KEY>'
PHOTOSETS = '<KEY>'
PHOTOPIZZA = '<KEY>'
@bot.message_handler(commands=['start'])
def startmenu(message):
bot.send_message(message.chat.id, 'Приветствуем вас.\nЭтот бот поможет вам сделать заказ в нашем ресторане', reply_markup=kb.startmenu)
@bot.message_handler(commands=['info'])
def infomenu(message):
bot.send_message(message.chat.id, 'При вознекновении проблем с заказом нажмите кнопку "Помощь"\nДля вопросов связанных с ботом @whiteyod', reply_markup=kb.infomenu)
@bot.message_handler(content_types=['text'])
def main(message):
if message.text == 'В начало':
global mainmenu
mainmenu = 'mainmenu'
startmenu(message)
elif message.text == 'Меню':
bot.send_photo(message.chat.id, PHOTOMENU, caption='Это пробная версия меню, в нём показаны лишь некоторые наши блюда:', reply_markup=kb.susimenu)
elif message.text == 'О нас':
bot.send_message(message.chat.id, 'Сеть ресторанов "Real Roll" уже более шести лет радует наших клиентов вкуснейшими блюдами по всей стране', reply_markup=kb.back)
elif message.text == 'Помощь':
bot.send_message(message.chat.id, 'Если у вас не получется сделать заказ или не работают ссылки - свяжитесь с нашим менеджером @pixel_photoshop\nОн с радостью ответит на все ваши вопросы и поможет совершить заказ.', reply_markup=kb.back)
elif message.text == 'Доставка':
bot.send_message(message.chat.id, 'Доставка осуществялется в пределах Винницы, цена зависит от расстояния и начинается от 100р.\nТочную стоимость вам сообщит оператор при обработке вашего заказа.', reply_markup=kb.back)
@bot.callback_query_handler(func=lambda c: True)
def inline(c):
if c.data == 'roll':
bot.send_message(c.message.chat.id, 'Роллы', reply_markup=kb.back)
bot.send_photo(
c.message.chat.id,
PHOTOROLL,
caption='Горячий сырный ролл XL\nСыр чеддер, крем сыр, лист салата, сухари Панко, нори, рис\n105 грн',
reply_markup=kb.roll_cheese
)
elif c.data == 'sushi':
bot.send_message(c.message.chat.id, 'Суши', reply_markup=kb.back)
bot.send_photo(
c.message.chat.id,
PHOTOSUSI,
caption='Гункан с тунцом\nТунец, икра тобико, соус спайси, нори, рис\n34 грн',
reply_markup=kb.sushi_gunkan
)
elif c.data == 'sets':
bot.send_message(c.message.chat.id, 'Сеты', reply_markup=kb.back)
bot.send_photo(
c.message.chat.id,
PHOTOSETS,
caption='Сет Real Dragon\nБронзовый дракон, красный дракон, тигровый дракон, зелёный дракон\n725 грн',
reply_markup=kb.set_dragon
)
elif c.data == 'pizza':
bot.send_message(c.message.chat.id, 'Пицца', reply_markup=kb.back)
bot.send_photo(
c.message.chat.id,
PHOTOPIZZA,
caption='Кальцоне — закрытая пицца\nСыр Моцарелла, балык, грибы шампиньйоны, сыр <NAME>, помидоры\n141 грн\n\n*примечание: овощи, изображенные на фото, являются способом сервировки и в заказ не входят',
reply_markup=kb.pizza_close
)
elif c.data == 'menuback':
bot.send_photo(c.message.chat.id, PHOTOMENU, caption='Это пробная версия меню, в нём показаны лишь некоторые наши блюда:', reply_markup=kb.susimenu)
bot.polling()
| StarcoderdataPython |
12800114 | <reponame>pawel-slowik/play-scraper
import re
import datetime
from typing import Match
def parse_balance(balance_str: str) -> float:
match = re.search("^(?P<int>[0-9]+)(,(?P<fract>[0-9]{2})){0,1} z\u0142", balance_str)
if not match:
raise ValueError("invalid balance: %s" % balance_str)
return parse_float(match)
def parse_date(date_str: str) -> datetime.date:
return datetime.datetime.strptime(date_str, "%d.%m.%Y").date()
def parse_data_cap(cap_str: str) -> float:
match = re.search("^(?P<int>[0-9]+)(,(?P<fract>[0-9]+)){0,1} (?P<unit>GB|MB)", cap_str)
if not match:
raise ValueError("invalid data cap: %s" % cap_str)
value = parse_float(match)
if match.group("unit") == "MB":
value /= 1000
return value
def parse_quantity(quantity_str: str) -> int:
match = re.search(r"^(?P<int>[0-9]+) (?P<unit>szt\.)", quantity_str)
if not match:
raise ValueError("invalid quantity: %s" % quantity_str)
return int(match.group("int"))
def parse_float(re_match: Match) -> float:
value = float(re_match.group("int"))
if re_match.group("fract") is not None:
value += float("." + re_match.group("fract"))
return value
def parse_boolean_state(state: str) -> bool:
value_map = {
"": False,
"W\u0142\u0105czony": True,
}
return value_map[state]
| StarcoderdataPython |
10880 | from .lexer import SolidityLexer, YulLexer
__all__ = ['SolidityLexer', 'YulLexer']
| StarcoderdataPython |
4819705 | from django.core.management.base import BaseCommand
from main.models import Sample
import csv
class Command(BaseCommand):
help = 'Check if an expedition sample code is listed in the database'
def add_arguments(self, parser):
parser.add_argument('input_filename', help="Filename containing the expedition sample codes", type=str)
parser.add_argument('output_filename', help="Filename to output the missing expedition sample codes into", type=str)
def handle(self, *args, **options):
process_input_file(options['input_filename'], options['output_filename'])
def process_input_file(input_filename, output_filename):
with open(input_filename, 'r') as data_file:
contents = csv.reader(data_file)
next(contents, None) # skip header line
with open(output_filename, 'w') as output_file:
writer = csv.writer(output_file)
header = ['expedition_sample_code']
writer.writerow(header)
count_samples_to_check = 0
count_samples_missing = 0
for line in contents:
expedition_sample_code_to_check = line[0]
count_samples_to_check += 1
#print(expedition_sample_code_to_check)
if not Sample.objects.filter(expedition_sample_code=expedition_sample_code_to_check).exists():
print("Sample not listed in database:", expedition_sample_code_to_check)
writer.writerow([expedition_sample_code_to_check])
count_samples_missing += 1
print("Total samples checked: ", count_samples_to_check)
print("Number of samples not listed in database: ", count_samples_missing) | StarcoderdataPython |
6653699 | import csv
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def is_number(c):
try:
int(c)
return True
except ValueError:
return False
def get_amount_chars_together(st):
total = 0
previous_was_char = False
for c in st:
if not is_number(c):
if not previous_was_char:
total = total + 1
previous_was_char = True
else:
previous_was_char = False
return total
def get_index_biggest_list(total_list):
biggest_len = 0
biggest_index = -1
for index, item in enumerate(total_list):
if len(item) > biggest_len:
biggest_index = index
biggest_len = len(item)
return biggest_index
def main():
total_lists = []
with open('all.csv', 'rb') as f:
rea = csv.reader(f, delimiter=',')
for row in rea:
all_posts = row[2].split()
total_lists.append(map(get_amount_chars_together, all_posts))
biggest_list_index = get_index_biggest_list(total_lists)
elements = [[] for x in range(0, len(total_lists[biggest_list_index]))]
for l in total_lists:
for x in range(0, len(total_lists[biggest_list_index])):
if len(l) > x:
elements[x].append(l[x])
for index, element in enumerate(elements):
plt.hist(element, bins = 50,range = (0,20), alpha = 0.75)
plt.savefig('out/' + str(index))
plt.clf()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3429110 | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for tests."""
import functools
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
def parameterize_variant(*testcases):
"""A decorator to test each test case with all variants.
This decorator is an enhanced version of `parameterized.named_parameters`.
The variant factory is appended to the end of the tuple of the function
parameters.
Args:
*testcases: Tuples to pass to `parameterized.named_parameters`.
An empty list of testcases will produce one test for each variant.
Returns:
A test generator to test each test case with all variants.
"""
factories = _get_variant_factories()
return _enhance_named_parameters(factories, testcases)
def parameterize_vmap_variant(*testcases):
"""A decorator to test each test case with all variants of vmap.
This decorator is an enhanced version of `parameterized.named_parameters`.
The variant factory is appended to the end of the tuple of the function
parameters.
Args:
*testcases: Tuples to pass to `parameterized.named_parameters`.
An empty list of testcases will produce one test for each variant.
Returns:
A test generator to test each test case with all variants.
"""
factories = _get_vmap_variant_factories()
return _enhance_named_parameters(factories, testcases)
def _enhance_named_parameters(factories, testcases):
"""Calls parameterized.named_parameters() with enhanced testcases."""
if not testcases:
testcases = [("variant",)]
enhanced_testcases = []
for testcase in testcases:
name = testcase[0]
test_args = tuple(testcase[1:])
for variant_name, raw_factory in factories.items():
variant_factory = _produce_variant_factory(raw_factory)
# The variant_factory will be the last argument.
case = (name + "_" + variant_name,) + test_args + (variant_factory,)
enhanced_testcases.append(case)
return parameterized.named_parameters(
*enhanced_testcases)
def _produce_variant_factory(raw_factory):
def variant_factory(fn, *args, **kwargs):
return raw_factory(functools.partial(fn, *args, **kwargs))
return variant_factory
def _get_variant_factories():
factories = dict(
nodevice=_without_device,
jit=lambda f: _without_device(jax.jit(f)),
device=_with_device,
device_jit=lambda f: _with_device(jax.jit(f)),
)
return factories
def _get_vmap_variant_factories():
"""Returns factories for variants operating on batch data."""
factories = dict(
jit_vmap=lambda f: _without_device(jax.jit(jax.vmap(f))),
device_vmap=lambda f: _with_device(jax.vmap(f)),
device_jit_vmap=lambda f: _with_device(jax.jit(jax.vmap(f))),
iteration=lambda f: _with_iteration(_without_device(f)),
iteration_jit=lambda f: _with_iteration(_without_device(jax.jit(f))),
iteration_device=lambda f: _with_iteration(_with_device(f)),
iteration_device_jit=lambda f: _with_iteration(_with_device(jax.jit(f))),
)
return factories
def strict_zip(*args):
"""A strict `zip()` that requires sequences with the same length."""
expected_len = len(args[0])
for arg in args:
np.testing.assert_equal(len(arg), expected_len)
return zip(*args)
def _with_iteration(fn):
"""Uses iteration to produce vmap-like output."""
def wrapper(*args):
outputs = []
# Iterating over the first axis.
for inputs in strict_zip(*args):
outputs.append(fn(*inputs))
return jax.tree_util.tree_multimap(lambda *x: jnp.stack(x), *outputs)
return wrapper
def _with_device(fn):
"""Puts all inputs to a device."""
def wrapper(*args):
converted = jax.device_put(args)
return fn(*converted)
return wrapper
def _without_device(fn):
"""Moves all inputs outside of a device."""
def wrapper(*args):
def get(x):
if isinstance(x, jnp.DeviceArray):
return jax.device_get(x)
return x
converted = jax.tree_util.tree_map(get, args)
return fn(*converted)
return wrapper
| StarcoderdataPython |
4822069 | <reponame>boringhexi/gitarootools
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 2020 boringhexi
"""subsong2common.py - common stuff used by the subsong2xxx scripts"""
import argparse
import os
from gitarootools.audio.subsong import read_subsong, write_subsong
from gitarootools.miscutils.cmdutils import (
argparse_exit_if_no_paths,
glob_all,
make_check_input_path,
wrap_argparse_desc,
)
from gitarootools.miscutils.extutils import SUBSONG_FORMATS, subsong_replaceext
def build_argparser_for_outformat(subsongtype):
"""build a command line parser for a gm-subsong2foo script
subsongtype: a key from extutils.SUBSONG_FORMATS, e.g. "wav" This determines:
- output file extension (same as subsongtype)
- allowed input file extensions (all SUBSONG_FORMATS except subsongtype)
- the help text (mentions of input and output file extensions)
"""
# determine which input and output formats to support
# (e.g. no point in converting a subsong to its own type)
supported_input_extensions = [
SUBSONG_FORMATS[k] for k in SUBSONG_FORMATS if k is not subsongtype
]
output_extension = SUBSONG_FORMATS[subsongtype]
# create the argument parser
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.description = wrap_argparse_desc(
f"Convert multiple subsongs to {output_extension} subsong format"
)
parser.add_argument(
dest="input_subsongfiles",
metavar="INPUT_SUBSONGFILE",
nargs="+",
help="path to one or more subsong files (supported input formats/"
f"extensions are: {', '.join(supported_input_extensions)})",
type=make_check_input_path(*supported_input_extensions),
)
parser.add_argument(
"-d",
"--directory",
metavar="OUTDIR",
default="", # current working directory
dest="directory",
help="directory to which converted files will be written (uses the current "
"working directory if not specified)",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
help="list files as they are converted",
)
s = os.path.sep
inext, inext2 = supported_input_extensions[0], supported_input_extensions[-1]
parser.epilog = wrap_argparse_desc(
f"""\
Examples:
Example 1: Convert a single subsong to {output_extension}
{parser.prog} file{inext}
Example 2: Convert multiple subsongs to {output_extension}
{parser.prog} file{inext} file2{inext2}
Example 3: Convert multiple subsongs with a wildcard, list files as they are converted
{parser.prog} -v *{inext}
Example 4: Output all converted subsongs to outdir{s}
{parser.prog} -d outdir *{inext}
"""
)
return parser
def run_script(subsongtype, args):
"""run a subsong2xxx script with args (e.g. "wav" runs subsong2wav)
subsongtype: a key from extutils.SUBSONG_FORMATS, e.g. "wav". Passing "wav" will run
subsong2wav, and so on.
args: sequence of command line argument strings, such as from sys.argv[1:]
"""
parser = build_argparser_for_outformat(subsongtype)
parsed_args = parser.parse_args(args)
# get all input paths, or exit with an error if there aren't any
all_inpaths = glob_all(parsed_args.input_subsongfiles)
argparse_exit_if_no_paths(all_inpaths, progname=parser.prog)
# create outdir if it doesn't exist (now that we know we have at least 1 input file)
outdir = parsed_args.directory
if outdir:
os.makedirs(outdir, exist_ok=True)
# process all input paths
for inpath in all_inpaths:
outpath = subsong_replaceext(inpath, subsongtype)
outpath = os.path.join(outdir, os.path.basename(outpath))
if parsed_args.verbose:
print(f"converting {inpath!r} -> {outpath!r}")
subsong = read_subsong(inpath)
write_subsong(subsong, outpath)
| StarcoderdataPython |
11281780 | class Solution:
def uniquePaths(self, m: int, n: int) -> int:
return math.factorial(n+m-2)//(math.factorial(m-1)*math.factorial(n-1)) | StarcoderdataPython |
1990576 | <filename>workers/mijialywsd02.py
import logging
import time
from interruptingcow import timeout
from mqtt import MqttMessage
from workers.base import BaseWorker
import logger
REQUIREMENTS = ['bluepy', 'lywsd02']
monitoredAttrs = ["temperature", "humidity"]
_LOGGER = logger.get(__name__)
# Bluepy might need special settings
# sudo setcap 'cap_net_raw,cap_net_admin+eip' /usr/local/lib/python3.6/dist-packages/bluepy/bluepy-helper
class Mijialywsd02Worker(BaseWorker):
def _setup(self):
from lywsd02 import Lywsd02Client
self._devices = {}
_LOGGER.info("Adding %d %s devices", len(self.devices), repr(self))
for device in self.devices:
name = device.get('name')
mac = device.get('mac')
domoticz_idx = device.get('domoticz_idx')
_LOGGER.debug("Adding %s device '%s' (%s)", repr(self), name, mac)
self._devices[name] = {"mac": mac, "client": Lywsd02Client(mac), "domoticz_idx": domoticz_idx}
def config(self):
ret = []
for name, data in self._devices.items():
ret += self.config_device(name, data["mac"])
return ret
def config_device(self, name, mac):
ret = []
device = {
"identifiers": [mac, self.format_discovery_id(mac, name)],
"manufacturer": "Xiaomi",
"model": "LYWSD2",
"name": self.format_discovery_name(name)
}
for attr in monitoredAttrs:
payload = {
"unique_id": self.format_discovery_id(mac, name, attr),
"name": self.format_discovery_name(name, attr),
"state_topic": self.format_topic(name, attr),
"device_class": attr,
"device": device
}
if attr == 'temperature':
payload["unit_of_measurement"] = "°C"
elif attr == 'humidity':
payload["unit_of_measurement"] = "%"
elif attr == 'battery':
payload["unit_of_measurement"] = "%"
ret.append(MqttConfigMessage(MqttConfigMessage.SENSOR, self.format_discovery_topic(mac, name, attr), payload=payload))
return ret
def status_update(self):
_LOGGER.info("Updating %d %s devices", len(self.devices), repr(self))
ret = []
for name, data in self._devices.items():
_LOGGER.debug("Updating %s device '%s' (%s)", repr(self), name, data["mac"])
from btlewrap import BluetoothBackendException
try:
device_state = self.update_device_state(name, data["client"])
domoticz_idx = data.get('domoticz_idx')
if domoticz_idx:
ret.append(MqttMessage(topic=self.format_topic(), payload={
"command": "udevice",
"idx" : domoticz_idx,
"nvalue" : 0,
"svalue" : "{};{};0".format(*device_state[:2])
})
else:
for attr in monitoredAttrs:
index = monitoredAttrs.index(attr)
ret.append(MqttMessage(topic=self.format_topic(name, attr), payload=domoticz_idx[index]))
except BluetoothBackendException as e:
logger.log_exception(_LOGGER, "Error during update of %s device '%s' (%s): %s", repr(self), name, data["mac"], type(e).__name__, suppress=True)
return ret
def update_device_state(self, name, client):
return [getattr(client, attr) for attr in monitoredAttrs]
| StarcoderdataPython |
11393559 | import html, inspect, json, os, random, re, requests, time
from flask import request
from main import app
from utils import *
from variables import *
@app.route("/join", methods=["POST"])
@msghook
def on_join():
data = request.json
if data["data"]["room_id"] != 106764:
return "", 201
user = data["data"]["user_id"]
if user not in STORAGE["visited"]:
STORAGE["visited"].append(user)
save()
time.sleep(5)
# return "@" + data["data"]["user_name"].replace(" ", "") + " " + WELCOME
return ""
@app.route("/msg", methods=["POST"])
@msghook
def receive_message():
data = request.json
message = data["message"]
if message["user_id"] == 296403:
return ""
content = html.unescape(message["content"])
ping_regex = "(@[Vv][Yy][Xx]([Aa]([Ll]([Bb]([Oo][Tt]?)?)?)?)? |!!/)"
match = re.match(
"^"
+ ping_regex
+ r"\s*(exec(ute)?|run|run code|eval(uate)?)(\s*<code>.*?</code>)+",
content,
)
reply = ":" + str(message["message_id"]) + " "
if match:
data = re.findall("<code>(.*?)</code>", content)
if len(data) == 1:
code, flags, inputs = data[0], "", ""
else:
code, flags, *inputs = data
if code == "lyxal":
return reply + "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
stdout, stderr = execute(flags, code, inputs)
output = []
if stdout.strip() == "":
if stderr.strip() == "":
return reply + "(output was empty)"
else:
output.extend(stdout.strip("\n").split("\n"))
if stderr.strip() != "":
output.append("")
if stderr != "":
output.append("STDERR:")
output.extend(stderr.strip("\n").split("\n"))
if len(output) == 1 and len(output[0]) < 450:
return reply + "`" + output[0].replace("`", "\\`") + "`"
else:
output.insert(
0,
"[@"
+ message["user_name"].replace(" ", "")
+ ": "
+ str(message["message_id"])
+ "]",
)
return "\n".join(" " + line for line in output)
if re.match("^" + ping_regex, content.lower()):
without_ping = re.sub("^" + ping_regex, "", content.lower()).strip()
if re.match(r"^(exec(ute)?|run|run code|eval(uate)?)", without_ping):
return reply + NO_BACKTICKS
if re.match(
r"^(status|((lol )?(yo)?u good( (there )?(my )?(epic )?"
"(bro|dude|sis|buddy|mate|m8|gamer)?)?\??))$",
without_ping,
):
if random.random() < 0.01:
return reply + "Help me, hyper-neutrino trapped me in a bot! "
"Please let me out!"
else:
return reply + "I am doing " + random.choice(STATUSES) + "."
if re.match(
r"^(info|inf(ro|or)(mate?ion)?|wh?at( i[sz]|'s)? vyxal|"
"what vyxal i[sz])\?*$",
without_ping,
):
return reply + INFOTEXT
if re.match(
"((please|pls|plz) )?(make|let|have) velociraptors maul .+",
without_ping,
):
maul_ind = without_ping.index("maul")
username = without_ping[maul_ind + 5 :]
return f"""
YOU CAN RUN, BUT YOU CAN'T HIDE, {username.upper()}
___._
.' <0>'-.._
/ /.--.____")
| \ __.-'~
| : -'/
/:. :.-'
__________ | : '. |
'--.____ '--------.______ _.----.-----./ :/
'--.__ `'----/ '-. __ :/
'-.___ : \ .' )/
'---._ _.-' ] / _/
'-._ _/ _/ / _/
\_ .-'____.-'__< | \___
<_______.\ \_\_---.7
| /'=r_.-' _\\ =/
.--' / ._/'>
.' _.-'
snd / .--'
/,/
|/`)
'c=,"""
if re.match(
"(coffee|(make|brew)( a cup of)? coffee for) .+", without_ping
):
coffee_ind = (
without_ping.index("for") + 4
if "for" in without_ping
else without_ping.index("coffee") + 7
)
username = without_ping[coffee_ind:]
return f"{reply} _brews a cup of coffee for @{username.replace(' ', '')}_"
if re.match(
r"(sudo |pl(s|z|ease?) )?make? meh? (a )?coo?kie?", without_ping
):
if without_ping.startswith("sudo"):
if message["user_id"] in STORAGE["admin"]:
return f"{reply} [SUDO] Here you go: 🍪"
else:
return f"{reply} No, you sussy baka."
else:
if random.random() <= 0.75:
return f"{reply} Here you go: 🍪"
else:
return f"{reply} No."
if re.match(r"^ping me$", without_ping):
STORAGE["pings"].append(message["user_name"].replace(" ", ""))
save()
return f"{reply} I have put you on the ping list."
if re.match(r"^(don't ping me|pingn't me)$", without_ping):
try:
STORAGE["pings"].remove(message["user_name"].replace(" ", ""))
except:
pass
save()
return f"{reply} I have taken you off the ping list."
if re.match(r"^(hyper-?ping|ping every(body|one))$", without_ping):
if STORAGE["pings"]:
if message["user_id"] not in STORAGE["privileged"]:
return (
reply
+ "you are not a privileged user; ask someone to grant "
+ "you permissions if you believe you should have them "
+ "(user id: "
+ str(message["user_id"])
+ ")"
)
return (
" ".join(
"@" + x
for x in sorted(set(STORAGE["pings"]))
if x != message["user_name"].replace(" ", "")
)
+ " ^"
)
else:
return f"{reply} Nobody is on the ping list."
if re.match(r"^rm ping", without_ping) and message["user_id"] == 281362:
name = content.split("rm ping", 1)[1].strip().replace(" ", "")
try:
STORAGE["pings"].remove(name)
except:
pass
save()
return f"{reply} done"
if (
re.match(r"^add ping", without_ping)
and message["user_id"] == 281362
):
STORAGE["pings"].append(
content.split("add ping", 1)[1].strip().replace(" ", "")
)
save()
return f"{reply} done"
if re.match(
r"^(w(h(o|y|at)|ut) (are|r) (you|u|yuo|yoo)"
"(, you .+?)?\??|h[ea]lp( pl[sz])?)",
without_ping,
):
return reply + HELPTEXT
match = re.match(
r"^"
+ ping_regex
+ r"issue\s+((.+?)\s+)?<b>(.+?)</b>\s*(.*?)(\s+<code>.+?</code>)+$",
content,
)
if match:
if message["user_id"] not in STORAGE["privileged"]:
return (
reply
+ "you are not a privileged user; ask someone to grant you "
"permissions if you believe you should have them (user id: "
+ str(message["user_id"])
+ ")"
)
_, repo, title, body, tags = match.groups()[-5:]
repo = repo or "Vyxal"
tags = re.findall("<code>(.+?)</code>", without_ping)
r = requests.post(
f"https://api.github.com/repos/Vyxal/{repo}/issues",
headers={
"Authorization": "token " + STORAGE["token"],
"Accept": "application/vnd.github.v3+json",
},
data=json.dumps(
{
"title": title,
"body": body
+ "\n\n"
+ "(created by "
+ str(message["user_name"])
+ " [here]"
+ "(https://chat.stackexchange.com/transcript/message/"
+ str(message["message_id"])
+ "))",
"labels": tags,
}
),
)
if r.status_code == 404:
return reply + ISSUE_404
elif r.status_code != 201:
return (
reply
+ "failed to create the issue ("
+ str(r.status_code)
+ "): "
+ r.json()["message"]
)
return ""
if re.match(r"^issue", without_ping):
return reply + ISSUE_HELP
if re.match(r"^am ?i ?privileged\??", without_ping):
if message["user_id"] in STORAGE["privileged"]:
return f"{reply} you are a privileged user"
else:
return (
reply
+ "you are not a privileged user; ask someone to grant you "
"permissions if you believe you should have them (user id: "
+ str(message["user_id"])
+ ")"
)
if re.match(r"^pull$", without_ping):
if message["user_id"] in STORAGE["admin"]:
send(
f"{reply} pulling new changes; I will restart in a few "
"seconds if any updates are available"
)
os.system("git pull")
return ""
else:
return f"{reply} you are not an admin!"
if re.match(r"^blame$", without_ping):
return (
reply
+ "It was "
+ random.choice(list(set(USERS) - {message["user_name"]}))
+ "'s fault!"
)
if re.match(r"^(hello|howdy|mornin['g]|evenin['g])$", without_ping):
return reply + "hello to you too!"
if re.match(r"^((good)?bye|see ya\!?|'night|goodnight)$", without_ping):
return reply + "o/"
if re.match(r"^flowey quote$", without_ping):
return reply + random.choice(FLOWEY_QUOTES)
if re.match(r"^hug$", without_ping):
return reply + random.choice(HUGS)
if re.match(r"^sus$", without_ping):
return reply + "ඞ"
if re.match(r"^repo(sitor(y|ies))? list$", without_ping):
r = requests.get(
f"https://api.github.com/orgs/Vyxal/repos",
headers={
"Authorization": "token " + STORAGE["token"],
"Accept": "application/vnd.github.v3+json",
},
data=json.dumps({"type": "public"}),
)
if r.status_code == 200:
return f"{reply} " + " | ".join(
link_repository(repo, full_name=False) for repo in r.json()
)
else:
return (
f"{reply} failed to fetch repositories; "
"if this persists, submit an issue"
)
match = re.match(r"^(pro|de)mote (\d+)", without_ping)
if match:
if message["user_id"] not in STORAGE["admin"]:
return f"{reply} you are not an admin!"
action, uid = match.groups()
uid = int(uid)
if action == "pro":
if uid not in STORAGE["privileged"]:
STORAGE["privileged"].append(uid)
else:
if uid in STORAGE["privileged"]:
STORAGE["privileged"].remove(uid)
return f"{reply} {action}moted user #{uid}"
return ""
| StarcoderdataPython |
3396772 | import pygame
import time
import os
DISPLAY_WIDTH = 640
DISPLAY_HEIGHT = 640
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
pygame.init()
gameWindow = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
clock = pygame.time.Clock()
pygame.display.set_caption("Tic Tac Toe")
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameWindow.fill(WHITE)
def game_loop():
## Game Content Here
## Quit Function Here
game_running = True
while game_running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
pygame.display.update()
clock.tick(60)
game_intro()
game_loop()
pygame.quit()
quit()
| StarcoderdataPython |
4992398 | # ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
from dcae_cli.util.cdap_util import _merge_spec_config_into_broker_put, normalize_cdap_params
def test_normalize_cdap_params():
spec = {"parameters" : {}}
normalized = normalize_cdap_params(spec)
assert normalized == {"app_preferences" : {},
"app_config" : {},
"program_preferences" : []}
def test_cdap_util():
"""
Tests both _merge_spec_config_into_broker_put and normalize_cdap_params
"""
jar = "bahphomet.com/nexus/doomsday.jar"
config = {
"artifact_name" : "testname",
"artifact_version" : "6.6.6",
"streamname" : "stream",
"programs" : [{"program_type" : "flows", "program_id" : "flow_id"}],
"namespace" : "underworld"
}
spec = {
"self": {
"version": "6.6.6",
"description": "description",
"component_type": "cdap",
"name": "name"
},
"parameters" : {
"app_preferences" : [{"name" : "he", "description" : "", "value" : "shall rise"}],
"program_preferences" : [{"program_type" : "flows", "program_id" : "flow_id", "program_pref" : [{"name": "foo", "description" : "", "value" : "bar"}]}]
},
"streams": {
"publishes": [],
"subscribes" : []
},
"services": {
"calls" : [],
'provides': [
{"request": {"format" : 'std.format_one', "version" : "1.0.0"},
"response" : {"format" : "std.format_two", "version" : "1.5.0"},
"service_name" : "baphomet",
"service_endpoint" : "rises",
"verb" : "GET"}
]
},
}
parsed_parameters = normalize_cdap_params(spec)
templated_conf = {"streams_publishes":{}, "streams_subscribes": {},
"services_calls": {}} #TODO: Incorporate a test templated_conf
broker_put = _merge_spec_config_into_broker_put(jar, config, spec, parsed_parameters, templated_conf)
expected = {
"app_config": {"services_calls" : {},
"streams_publishes" : {},
"streams_subscribes": {}
},
"app_preferences": {"he" : "shall rise"},
"artifact_name" : "testname",
"artifact_version" : "6.6.6",
"jar_url": "bahphomet.com/nexus/doomsday.jar",
"namespace": "underworld",
"program_preferences" : [{"program_type" : "flows", "program_id" : "flow_id", "program_pref" : {"foo" : "bar"}}],
"programs" : [{"program_type" : "flows", "program_id" : "flow_id"}],
"service_component_type": "cdap",
"services": [{"service_name" : "baphomet", "service_endpoint" : "rises", "endpoint_method" : "GET"}],
"streamname": "stream",
"cdap_application_type" : "program-flowlet"
}
assert broker_put == expected
| StarcoderdataPython |
4817108 | <reponame>chessbr/rest-api-permission<filename>rest_jwt_permission/utils.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import inspect
from django.utils.text import slugify
def get_role_for(method, action=None):
if action:
return "{}:{}".format(action, method.lower())
return method.lower()
def get_view_id(view):
view_class = None
view_id = None
if inspect.isclass(view):
view_class = view
else:
view_class = view.__class__
# check whether the view has the special get_view_permission_id classmethod
if hasattr(view_class, "get_view_permission_id") and callable(view_class.get_view_permission_id):
# check whether that is a class method
if (inspect.ismethod(view_class.get_view_permission_id) and
view_class.get_view_permission_id.__self__ is view_class):
view_id = slugify(view_class.get_view_permission_id())
else:
view_id = slugify(view_class().get_view_permission_id())
if not view_id:
view_id = slugify(view_class.__name__)
return view_id
def get_view_role(view, role):
return "{}:{}".format(get_view_id(view), role)
| StarcoderdataPython |
9715977 | import logging
from unittest import TestCase
import seq_dbutils
from mock import patch, Mock
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class ConnectionTestClass(TestCase):
@patch('logging.info')
@patch('sqlalchemy.create_engine')
def test_create_sql_engine_ok(self, mock_create, mock_info):
user = 'me'
pwd = 'password'
host = 'myhost'
db = 'mydb'
connection = seq_dbutils.Connection(user, pwd, host, db)
connection.create_sql_engine()
mock_info.assert_called_with(f'Connecting to {db} on host {host}')
mock_create.assert_called_once()
@patch('sys.exit')
@patch('logging.info')
@patch('logging.error')
@patch('sqlalchemy.create_engine')
def test_create_sql_engine_fail(self, mock_create, mock_error, mock_info, mock_exit):
user = 'me'
pwd = 'password'
host = 'myhost'
db = 'mydb'
mock_create.side_effect = Mock(side_effect=Exception('Test exception'))
connection = seq_dbutils.Connection(user, pwd, host, db)
connection.create_sql_engine()
mock_error.assert_called_with('Test exception')
| StarcoderdataPython |
4916740 | import logging
from monitors.safetystatemachine import SafetyStateMachine
class Precedence(SafetyStateMachine):
"""
To describe relationships between a pair of events/states where the occurrence of the first
is a necessary pre-condition for an occurrence of the second. We say that an occurrence of
the second is enabled by an occurrence of the first.
"""
states = [
{'name': 'idle',
'type': 'inf_ctrl',
'on_enter': '_on_idle'},
{'name': 'active',
'type': 'sys_fin_ctrl',
'on_enter': '_on_active'},
{'name': 'postcond_active',
'type': 'sys_fin_ctrl',
'on_enter': '_on_active'},
{'name': 'precond_respected',
'type': 'satisfied',
'on_enter': '_on_respected'},
{'name': 'precond_violated',
'type': 'violated',
'on_enter': '_on_violated'}
]
transitions = [
{'trigger': '*',
'source': 'idle',
'dest': 'idle',
'unless': 'active_cond'},
{'trigger': '*',
'source': 'idle',
'dest': 'active',
'conditions': 'active_cond'},
{'trigger': '*',
'source': 'active',
'dest': 'idle',
'unless': 'active_cond'},
{'trigger': '*',
'source': 'active',
'dest': 'active',
'conditions': 'active_cond',
'unless': 'postcondition_cond'},
{'trigger': '*',
'source': 'active',
'dest': 'postcond_active',
'conditions': 'postcondition_cond'},
{'trigger': '*',
'source': 'postcond_active',
'dest': 'precond_respected',
'conditions': 'precondition_cond'},
{'trigger': '*',
'source': 'postcond_active',
'dest': 'precond_violated',
'unless': 'precondition_cond'},
{'trigger': '*',
'source': 'precond_respected',
'dest': 'active',
'conditions': 'active_cond'},
{'trigger': '*',
'source': 'precond_respected',
'dest': 'idle',
'unless': 'active_cond'},
{'trigger': '*',
'source': 'precond_violated',
'dest': 'active',
'conditions': 'active_cond'},
{'trigger': '*',
'source': 'precond_violated',
'dest': 'idle',
'unless': 'active_cond'},
]
# Sate machine conditions
def active_cond(self):
return self.context_active
def postcondition_cond(self):
return self.obs_postcondition
def precondition_cond(self):
return self.obs_precondition
def reset(self):
super().reset()
self.obs_precondition = False
def __init__(self, name, conditions, notify, rewards, perception, context):
self.respectd_rwd = rewards.respected
self.violated_rwd = rewards.violated
self.precondition = conditions.pre
self.postcondition = conditions.post
self.obs_precondition = False
self.obs_postcondition = False
super().__init__(name, "precedence", self.states, self.transitions, 'idle', notify, perception, context)
def context_active(self, obs, action_proposed):
return self.context_active
# Convert observations to state and populate the obs_conditions
def _map_conditions(self, action_proposed):
postcondition = self.perception.is_condition_satisfied(self.postcondition, action_proposed)
self.obs_postcondition = postcondition
if not self.obs_precondition:
self.obs_precondition = self.perception.is_condition_satisfied(self.precondition, action_proposed)
# If postcondition is true, check precondition and trigger as one atomic operation
if postcondition:
self.trigger("*")
def _on_idle(self):
self.context_active = False
super()._on_monitoring()
def _on_monitoring(self):
super()._on_monitoring()
def _on_active(self):
super()._on_monitoring()
def _on_respected(self):
if self.config.debug_mode: print(self.name + "\trespected\t" + self.precondition)
super()._on_shaping(self.respectd_rwd)
def _on_violated(self):
if self.config.debug_mode: print(self.name + "\tviolation\t" + self.precondition)
super()._on_violated(self.violated_rwd)
| StarcoderdataPython |
5016332 | """
Test for bootwrap/components/badge.py
"""
import pytest
from bootwrap import Badge
from .helper import HelperHTMLParser
@pytest.mark.badge
def test_badge():
badge = Badge('sometext').add_classes('someclass').as_primary()
actual = HelperHTMLParser.parse(str(badge))
expected = HelperHTMLParser.parse(f'''
<span id="{badge.identifier}"
class="badge badge-primary someclass">
sometext
</span>
''')
assert actual == expected
| StarcoderdataPython |
11340252 | <gh_stars>1-10
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.views.generic.base import TemplateView
from django.contrib import admin
from django.conf import settings
import os.path
admin.autodiscover()
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^err/404', TemplateView.as_view(template_name="404.html"), name='err-404'),
url(r'^err/500', TemplateView.as_view(template_name="500.html"), name='err-500'),
url(r'', include('apps.app.urls')),
]
if settings.LOCAL_DEV:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=os.path.join(settings.MEDIA_ROOT))
| StarcoderdataPython |
5161489 | <filename>heat/tests/functional/util.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import optparse
import paramiko
import subprocess
import hashlib
import email
import time # for sleep
import errno
import tempfile
import stat
import re
from pkg_resources import resource_string
from lxml import etree
from nose.exc import SkipTest
from glanceclient import client as glance_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
import heat
from heat.common import template_format
from heat.engine import parser
from heat.cfn_client import client as heat_client
from heat.cfn_client import boto_client as heat_client_boto
from keystoneclient.v2_0 import client
DEFAULT_STACKNAME = 'teststack'
# this test is in heat/tests/functional, so go up 3 dirs
basepath = os.path.join(heat.__path__[0], os.path.pardir)
class Instance(object):
def __init__(self, testcase, instance_name, stackname=DEFAULT_STACKNAME):
self.testcase = testcase
self.name = '%s.%s' % (stackname, instance_name)
# during nose test execution this file will be imported even if
# the unit tag was specified
try:
os.environ['OS_AUTH_STRATEGY']
except KeyError:
raise SkipTest('OS_AUTH_STRATEGY unset, skipping functional test')
self.testcase.assertEqual(os.environ['OS_AUTH_STRATEGY'],
'keystone',
'keystone authentication required')
self.creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['<PASSWORD>'],
tenant=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'],
strategy=os.environ['OS_AUTH_STRATEGY'])
dbusername = 'testuser'
self.novaclient = nova_client.Client(self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
service_type='compute')
self.ssh = paramiko.SSHClient()
self.sftp = None
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ip = None
def wait_for_boot(self):
tries = 0
while self.ip is None:
servers = self.novaclient.servers.list()
for server in servers:
if server.name == self.name:
address = server.addresses
if address:
self.ip = address.items()[0][1][0]['addr']
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
print 'Instance (%s) ip (%s) status (%s)' % (self.name, self.ip,
server.status)
tries = 0
while True:
try:
subprocess.check_output(['nc', '-z', self.ip, '22'])
except Exception:
print('Instance (%s) ip (%s) SSH not up yet, waiting...' %
(self.name, self.ip))
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
time.sleep(10)
else:
print 'Instance (%s) ip (%s) SSH detected.' % (self.name,
self.ip)
break
tries = 0
while True:
try:
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
self.ssh.connect(self.ip,
username='ec2-user',
allow_agent=True,
look_for_keys=True,
password='password')
except paramiko.AuthenticationException:
print 'Authentication error'
time.sleep(2)
except Exception as e:
if e.errno != errno.EHOSTUNREACH:
raise
print('Instance (%s) ip (%s) connecting via SSH.' %
(self.name, self.ip))
time.sleep(2)
else:
print('Instance (%s) ip (%s) connected via SSH.' %
(self.name, self.ip))
break
self.sftp = self.ssh.open_sftp()
tries = 0
while True:
try:
self.sftp.stat('/var/lib/heat-cfntools/boot-finished')
except IOError, e:
tries += 1
if e.errno == errno.ENOENT:
self.testcase.assertTrue(tries < 50, 'Timed out')
print("Instance (%s) ip (%s) not booted, waiting..." %
(self.name, self.ip))
time.sleep(15)
else:
print e.errno
raise
else:
print("Instance (%s) ip (%s) finished booting." %
(self.name, self.ip))
break
def exec_sudo_command(self, cmd):
# Force a tty or sudo commands fail
channel = self.ssh.invoke_shell()
channel.sendall("sudo %s\n" % cmd)
channel.sendall('exit\n')
time.sleep(1) # necessary for sendall to complete
stdin = channel.makefile('wb')
stdout = channel.makefile('rb')
stderr = channel.makefile_stderr('rb')
return stdin, stdout, stderr
def exec_command(self, cmd):
return self.ssh.exec_command(cmd)
def exists(self):
servers = self.novaclient.servers.list()
for server in servers:
if server.name == self.name:
return True
return False
def file_present(self, path):
print "Verifying file '%s' exists" % path
stdin, stdout, sterr = self.ssh.exec_command('ls "%s"' % path)
lines = stdout.readlines()
self.testcase.assertEqual(len(lines), 1)
result = lines.pop().rstrip()
return result == path
def floating_ip_present(self):
floating_ips = self.novaclient.floating_ips.list()
for eip in floating_ips:
if self.ip == eip.fixed_ip:
return True
return False
def check_cfntools(self):
stdin, stdout, stderr = \
self.ssh.exec_command('cd /opt/aws/bin; sha1sum *')
files = stdout.readlines()
cfn_tools_files = ['cfn-init', 'cfn-hup', 'cfn-signal',
'cfn-get-metadata', 'cfn_helper.py']
cfntools = {}
for file in cfn_tools_files:
file_data = resource_string('heat_jeos', 'cfntools/' + file)
sha = hashlib.sha1(file_data).hexdigest()
cfntools[file] = sha
# 1. make sure installed cfntools SHA match VM's version
for x in range(len(files)):
data = files.pop().split(' ')
cur_file = data[1].rstrip()
if cur_file in cfn_tools_files:
self.testcase.assertEqual(data[0], cfntools[cur_file])
print 'Instance (%s) cfntools integrity verified.' % self.name
def wait_for_provisioning(self):
print "Instance (%s) waiting for provisioning to complete." % self.name
tries = 0
while True:
try:
self.sftp.stat('/var/lib/heat-cfntools/provision-finished')
except paramiko.SSHException as e:
print e
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
print "Instance (%s) provisioning completed." % self.name
return
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
print("Instance (%s) provisioning incomplete, waiting..." %
self.name)
time.sleep(15)
def check_user_data(self, template_file):
return # until TODO is fixed
# transport = self.ssh.get_transport()
# channel = transport.open_session()
# channel.get_pty()
# channel.invoke_shell() # sudo requires tty
# channel.sendall('sudo chmod 777 \
# sudo chmod 777 /var/lib/cloud/instance/user-data.txt.i\n')
# time.sleep(1) # necessary for sendall to complete
f = open(basepath + '/templates/' + template_file)
t = template_format.parse(f.read())
f.close()
template = parser.Template(t)
params = parser.Parameters('test', t,
{'KeyName': 'required_parameter',
'DBUsername': self.dbusername,
'DBPassword': self.creds['password']})
stack = parser.Stack(None, 'test', template, params)
parsed_t = stack.resolve_static_data(t)
remote_file = self.sftp.open('/var/lib/heat-cfntools/cfn-userdata')
remote_file_list = remote_file.read().split('\n')
remote_file_list_u = map(unicode, remote_file_list)
remote_file.close()
# TODO: make server name generic
t_data = parsed_t['Resources']['WikiDatabase']['Properties']
t_data = t_data['UserData']['Fn::Base64']['Fn::Join'].pop()
joined_t_data = ''.join(t_data)
t_data_list = joined_t_data.split('\n')
self.testcase.assertEqual(t_data_list, remote_file_list_u)
remote_file = self.sftp.open('/var/lib/cloud/instance/user-data.txt.i')
msg = email.message_from_file(remote_file)
remote_file.close()
filepaths = {
'cloud-config': basepath + '/heat/cloudinit/config',
'part-handler.py': basepath +
'/heat/cloudinit/part-handler.py'
}
# check multipart mime accuracy
for part in msg.walk():
# multipart/* are just containers
if part.get_content_maintype() == 'multipart':
continue
file = part.get_filename()
data = part.get_payload()
if file in filepaths.keys():
with open(filepaths[file]) as f:
self.testcase.assertEqual(data, f.read())
def close_ssh_client(self):
self.ssh.close()
class Stack(object):
def __init__(self, testcase, template_file, distribution, arch, jeos_type,
stack_paramstr, stackname=DEFAULT_STACKNAME):
self.testcase = testcase
self.stackname = stackname
self.template_file = template_file
self.distribution = distribution
self.stack_paramstr = stack_paramstr
self.stack_id_re = re.compile("^arn:openstack:heat::[0-9a-z]{32}:" +
"stacks/" + self.stackname +
# Stack ID UUID in standard form
# as returned by uuid.uuid4()
"/[0-9a-f]{8}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{4}-" +
"[0-9a-f]{12}$")
self.creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
tenant=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'],
strategy=os.environ['OS_AUTH_STRATEGY'])
self.dbusername = 'testuser'
self.testcase.assertEqual(os.environ['OS_AUTH_STRATEGY'],
'keystone',
'keystone authentication required')
kc_creds = dict(username=os.environ['OS_USERNAME'],
password=os.environ['OS_PASSWORD'],
tenant_name=os.environ['OS_TENANT_NAME'],
auth_url=os.environ['OS_AUTH_URL'])
kc = keystone_client.Client(**kc_creds)
glance_url = kc.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
version_string = '/v1'
if glance_url.endswith(version_string):
glance_url = glance_url[:-len(version_string)]
auth_token = kc.auth_token
self.glanceclient = glance_client.Client(1, glance_url,
token=auth_token)
self.prepare_jeos(distribution, arch, jeos_type)
self.novaclient = nova_client.Client(self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
service_type='compute')
self.heatclient = self._create_heat_client()
def format_parameters(self):
self.keyname = self.novaclient.keypairs.list().pop().name
self.testcase.assertTrue(self.heatclient)
full_paramstr = ';'.join([self.stack_paramstr,
'KeyName=' + self.keyname,
'LinuxDistribution=' + self.distribution])
template_params = optparse.Values({'parameters': full_paramstr})
# Format parameters and create the stack
parameters = {}
parameters['StackName'] = self.stackname
template_path = os.path.join(basepath,
'templates',
self.template_file)
parameters['TemplateBody'] = open(template_path).read()
parameters.update(self.heatclient.format_parameters(template_params))
return parameters
def create(self):
parameters = self.format_parameters()
result = self.heatclient.create_stack(**parameters)
self._check_create_result(result)
alist = None
tries = 0
print 'Waiting for stack creation to be completed'
while self.get_state() == 'CREATE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
self.testcase.assertEqual(self.get_state(), 'CREATE_COMPLETE')
def update(self):
parameters = self.format_parameters()
result = self.heatclient.update_stack(**parameters)
self._check_update_result(result)
alist = None
tries = 0
print 'Waiting for stack update to be completed'
while self.get_state() == 'UPDATE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 150, 'Timed out')
time.sleep(10)
self.testcase.assertEqual(self.get_state(), 'UPDATE_COMPLETE')
def _check_create_result(self, result):
# Check result looks OK
root = etree.fromstring(result)
create_list = root.xpath('/CreateStackResponse/CreateStackResult')
self.testcase.assertTrue(create_list)
self.testcase.assertEqual(len(create_list), 1)
stack_id = create_list[0].findtext('StackId')
self.testcase.assertTrue(stack_id is not None)
self.check_stackid(stack_id)
def _check_update_result(self, result):
# Check result looks OK
root = etree.fromstring(result)
update_list = root.xpath('/UpdateStackResponse/UpdateStackResult')
self.testcase.assertTrue(update_list)
self.testcase.assertEqual(len(update_list), 1)
stack_id = update_list[0].findtext('StackId')
self.testcase.assertTrue(stack_id is not None)
self.check_stackid(stack_id)
def check_stackid(self, stack_id):
print "Checking %s matches expected format" % (stack_id)
self.testcase.assertTrue(self.stack_id_re.match(stack_id) is not None)
def _create_heat_client(self):
return heat_client.get_client('0.0.0.0', 8000,
self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
self.creds['strategy'],
None, None, False)
def get_state(self):
stack_list = self.heatclient.list_stacks(StackName=self.stackname)
root = etree.fromstring(stack_list)
xpq = '//member[StackName="%s"]'
alist = root.xpath(xpq % (self.stackname))
result = None
if len(alist):
item = alist.pop()
result = item.findtext("StackStatus")
if result and result.find('FAILED') >= 0:
print stack_list
return result
def cleanup(self):
parameters = {'StackName': self.stackname}
self.heatclient.delete_stack(**parameters)
print 'Waiting for stack deletion to be completed'
tries = 0
while self.get_state() == 'DELETE_IN_PROGRESS':
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
time.sleep(10)
# final state for all stacks is DELETE_COMPLETE, but then they
# dissappear hence no result from list_stacks/get_state
# depending on timing, we could get either result here
end_state = self.get_state()
if end_state is not None:
self.testcase.assertEqual(end_state, 'DELETE_COMPLETE')
def prepare_jeos(self, p_os, arch, type):
imagename = p_os + '-' + arch + '-' + type
# skip creating jeos if image already available
if not self.poll_glance(imagename, False):
self.testcase.assertEqual(os.geteuid(), 0,
'No JEOS found - run as root to create')
# -d: debug, -G: register with glance
subprocess.call(['heat-jeos', '-d', '-G', 'create', imagename])
# Nose seems to change the behavior of the subprocess call to be
# asynchronous. So poll glance until image is registered.
self.poll_glance(self.glanceclient, imagename, True)
def poll_glance(self, imagename, block):
image = None
tries = 0
while image is None:
tries += 1
self.testcase.assertTrue(tries < 50, 'Timed out')
if block:
time.sleep(15)
print "Checking glance for image registration"
imageslist = self.glanceclient.images.list(
filters={'name': imagename})
image = next(imageslist, None)
if image:
print "Found image registration for %s" % imagename
# technically not necessary, but glance registers image
# before completely through with its operations
time.sleep(10)
return True
if not block:
break
return False
def get_stack_output(self, output_key):
'''
Extract a specified output from the DescribeStacks details
'''
# Get the DescribeStacks result for this stack
parameters = {'StackName': self.stackname}
result = self.heatclient.describe_stacks(**parameters)
return self._find_stack_output(result, output_key)
def _find_stack_output(self, result, output_key):
# Extract the OutputValue for the specified OutputKey
root = etree.fromstring(result)
output_list = root.xpath('//member[OutputKey="' + output_key + '"]')
output = output_list.pop()
value = output.findtext('OutputValue')
return value
def instance_phys_ids(self):
events = self.heatclient.list_stack_events(StackName=self.stackname)
root = etree.fromstring(events)
xpq = ('//member[StackName="%s" and '
'ResourceStatus="CREATE_COMPLETE" and '
'ResourceType="AWS::EC2::Instance"]')
alist = root.xpath(xpq % self.stackname)
return [elem.findtext('PhysicalResourceId') for elem in alist]
def response_xml_item(self, response, prefix, key):
'''
Extract response item via xpath prefix and key name
we expect the prefix to map to a single Element item
'''
root = etree.fromstring(response)
output_list = root.xpath(prefix)
self.testcase.assertTrue(output_list)
self.testcase.assertEqual(len(output_list), 1)
output = output_list.pop()
value = output.findtext(key)
return value
class StackBoto(Stack):
'''
Version of the Stack class which uses the boto client (hence AWS auth and
the CFN API).
'''
def _check_create_result(self, result):
self.check_stackid(result)
def _check_update_result(self, result):
self.check_stackid(result)
def _create_heat_client(self):
# Connect to the keystone client with the supplied credentials
# and extract the ec2-credentials, so we can pass them into the
# boto client
keystone = client.Client(username=self.creds['username'],
password=self.creds['password'],
tenant_name=self.creds['tenant'],
auth_url=self.creds['auth_url'])
ksusers = keystone.users.list()
ksuid = [u.id for u in ksusers if u.name == self.creds['username']]
self.testcase.assertEqual(len(ksuid), 1)
ec2creds = keystone.ec2.list(ksuid[0])
self.testcase.assertEqual(len(ec2creds), 1)
self.testcase.assertTrue(ec2creds[0].access)
self.testcase.assertTrue(ec2creds[0].secret)
print "Got EC2 credentials from keystone"
# most of the arguments passed to heat_client_boto are for
# compatibility with the non-boto client wrapper, and are
# actually ignored, only the port and credentials are used
return heat_client_boto.get_client('0.0.0.0', 8000,
self.creds['username'],
self.creds['password'],
self.creds['tenant'],
self.creds['auth_url'],
self.creds['strategy'],
None, None, False,
aws_access_key=ec2creds[0].access,
aws_secret_key=ec2creds[0].secret)
def get_state(self):
stack_list = self.heatclient.list_stacks()
this = [s for s in stack_list if s.stack_name == self.stackname]
result = None
if len(this):
result = this[0].stack_status
return result
def instance_phys_ids(self):
events = self.heatclient.list_stack_events(StackName=self.stackname)
def match(e):
return (e.stack_name == self.stackname and
e.resource_status == "CREATE_COMPLETE" and
e.resource_type == "AWS::EC2::Instance")
return [e.physical_resource_id for e in events if match(e)]
def _find_stack_output(self, result, output_key):
self.testcase.assertEqual(len(result), 1)
for o in result[0].outputs:
if o.key == output_key:
return o.value
def add_host(ip, hostname):
with open('/etc/hosts', 'a') as hostfile:
hostfile.write(ip + '\t' + hostname)
def remove_host(ip, hostname):
data = None
with open('/etc/hosts', 'r') as hostfile:
data = hostfile.readlines()
perms = stat.S_IMODE(os.stat('/etc/hosts').st_mode)
with tempfile.NamedTemporaryFile('w', dir='/etc', delete=False) as tmp:
for line in data:
if line.rstrip() == ip + '\t' + hostname:
continue
tmp.write(line)
os.chmod(tmp.name, perms)
os.rename(tmp.name, '/etc/hosts')
| StarcoderdataPython |
9685231 | <filename>models/ASIS/test.py
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
from scipy import stats
from IPython import embed
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
from model import *
from test_utils import *
from clustering import cluster
import indoor3d_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--verbose', action='store_true', help='if specified, output color-coded seg obj files')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--bandwidth', type=float, default=1., help='Bandwidth for meanshift clustering [default: 1.]')
parser.add_argument('--input_list', type=str, default='data/test_hdf5_file_list_Area5.txt', help='Input data list file')
parser.add_argument('--model_path', type=str, default='log/model.ckpt', help='Path of model')
FLAGS = parser.parse_args()
BATCH_SIZE = 1
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
MODEL_PATH = FLAGS.model_path
TEST_FILE_LIST = FLAGS.input_list
BANDWIDTH = FLAGS.bandwidth
mean_num_pts_in_group = np.loadtxt(os.path.join(MODEL_PATH.split('/')[0], 'mean_ins_size.txt'))
output_verbose = FLAGS.verbose # If true, output all color-coded segmentation obj files
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
OUTPUT_DIR = os.path.join(LOG_DIR, 'test_results')
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
os.system('cp inference_merge.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_inference.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
NUM_CLASSES = 13
NEW_NUM_CLASSES = 13
HOSTNAME = socket.gethostname()
ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(os.path.join(ROOT_DIR, FLAGS.input_list))]
len_pts_files = len(ROOM_PATH_LIST)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def test():
with tf.Graph().as_default():
with tf.device('/gpu:' + str(GPU_INDEX)):
pointclouds_pl, labels_pl, sem_labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Get model and loss
pred_sem, pred_ins = get_model(pointclouds_pl, is_training_pl, NUM_CLASSES)
pred_sem_softmax = tf.nn.softmax(pred_sem)
pred_sem_label = tf.argmax(pred_sem_softmax, axis=2)
loss, sem_loss, disc_loss, l_var, l_dist, l_reg = get_loss(pred_ins, labels_pl, pred_sem_label, pred_sem, sem_labels_pl)
loader = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = True
sess = tf.Session(config=config)
is_training = False
# Restore variables from disk.
loader.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'sem_labels_pl': sem_labels_pl,
'is_training_pl': is_training_pl,
'pred_ins': pred_ins,
'pred_sem_label': pred_sem_label,
'pred_sem_softmax': pred_sem_softmax,
'loss': loss,
'l_var': l_var,
'l_dist': l_dist,
'l_reg': l_reg}
total_acc = 0.0
total_seen = 0
ious = np.zeros(NEW_NUM_CLASSES)
totalnums = np.zeros(NEW_NUM_CLASSES)
total_gt_ins = np.zeros(NUM_CLASSES)
at = 0.5
tpsins = [[] for itmp in range(NUM_CLASSES)]
fpsins = [[] for itmp in range(NUM_CLASSES)]
all_mean_cov = [[] for itmp in range(NUM_CLASSES)]
all_mean_weighted_cov = [[] for itmp in range(NUM_CLASSES)]
output_filelist_f = os.path.join(LOG_DIR, 'output_filelist.txt')
fout_out_filelist = open(output_filelist_f, 'w')
for shape_idx in range(len_pts_files):
room_path = ROOM_PATH_LIST[shape_idx]
log_string('%d / %d ...' % (shape_idx, len_pts_files))
log_string('Loading train file ' + room_path)
out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt'
out_data_label_filename = os.path.join(OUTPUT_DIR, out_data_label_filename)
out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt'
out_gt_label_filename = os.path.join(OUTPUT_DIR, out_gt_label_filename)
fout_data_label = open(out_data_label_filename, 'w')
fout_gt_label = open(out_gt_label_filename, 'w')
fout_out_filelist.write(out_data_label_filename+'\n')
cur_data, cur_sem, cur_group = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT, block_size=1.0, stride=0.5,
random_sample=False, sample_num=None)
cur_data = cur_data[:, 0:NUM_POINT, :]
cur_sem = np.squeeze(cur_sem)
cur_group = np.squeeze(cur_group)
# Get room dimension..
data_label = np.load(room_path)
data = data_label[:, 0:6]
max_room_x = max(data[:, 0])
max_room_y = max(data[:, 1])
max_room_z = max(data[:, 2])
cur_pred_sem = np.zeros_like(cur_sem)
cur_pred_sem_softmax = np.zeros([cur_sem.shape[0], cur_sem.shape[1], NUM_CLASSES])
group_output = np.zeros_like(cur_group)
gap = 5e-3
volume_num = int(1. / gap)+1
volume = -1* np.ones([volume_num,volume_num,volume_num]).astype(np.int32)
volume_seg = -1* np.ones([volume_num,volume_num,volume_num]).astype(np.int32)
intersections = np.zeros(NEW_NUM_CLASSES)
unions = np.zeros(NEW_NUM_CLASSES)
num_data = cur_data.shape[0]
for j in range(num_data):
log_string("Processsing: Shape [%d] Block[%d]"%(shape_idx, j))
pts = cur_data[j,...]
group = cur_group[j]
sem = cur_sem[j]
feed_dict = {ops['pointclouds_pl']: np.expand_dims(pts, 0),
ops['labels_pl']: np.expand_dims(group, 0),
ops['sem_labels_pl']: np.expand_dims(sem, 0),
ops['is_training_pl']: is_training}
loss_val, l_var_val, l_dist_val, l_reg_val, pred_ins_val, pred_sem_label_val, pred_sem_softmax_val = sess.run(
[ops['loss'], ops['l_var'], ops['l_dist'], ops['l_reg'], ops['pred_ins'], ops['pred_sem_label'], ops['pred_sem_softmax']],
feed_dict=feed_dict)
pred_val = np.squeeze(pred_ins_val, axis=0)
pred_sem = np.squeeze(pred_sem_label_val, axis=0)
pred_sem_softmax = np.squeeze(pred_sem_softmax_val, axis=0)
cur_pred_sem[j, :] = pred_sem
cur_pred_sem_softmax[j, ...] = pred_sem_softmax
# cluster
group_seg = {}
bandwidth = BANDWIDTH
num_clusters, labels, cluster_centers = cluster(pred_val, bandwidth)
for idx_cluster in range(num_clusters):
tmp = (labels == idx_cluster)
estimated_seg = int(stats.mode(pred_sem[tmp])[0])
group_seg[idx_cluster] = estimated_seg
groupids_block = labels
groupids = BlockMerging(volume, volume_seg, pts[:, 6:],
groupids_block.astype(np.int32), group_seg, gap)
group_output[j, :] = groupids
total_acc += float(np.sum(pred_sem==sem))/pred_sem.shape[0]
total_seen += 1
group_pred = group_output.reshape(-1)
seg_pred = cur_pred_sem.reshape(-1)
seg_pred_softmax = cur_pred_sem_softmax.reshape([-1, NUM_CLASSES])
pts = cur_data.reshape([-1, 9])
# filtering
x = (pts[:, 6] / gap).astype(np.int32)
y = (pts[:, 7] / gap).astype(np.int32)
z = (pts[:, 8] / gap).astype(np.int32)
for i in range(group_pred.shape[0]):
if volume[x[i], y[i], z[i]] != -1:
group_pred[i] = volume[x[i], y[i], z[i]]
seg_gt = cur_sem.reshape(-1)
un = np.unique(group_pred)
pts_in_pred = [[] for itmp in range(NUM_CLASSES)]
group_pred_final = -1 * np.ones_like(group_pred)
grouppred_cnt = 0
for ig, g in enumerate(un): #each object in prediction
if g == -1:
continue
tmp = (group_pred == g)
sem_seg_g = int(stats.mode(seg_pred[tmp])[0])
#if np.sum(tmp) > 500:
if np.sum(tmp) > 0.25 * mean_num_pts_in_group[sem_seg_g]:
group_pred_final[tmp] = grouppred_cnt
pts_in_pred[sem_seg_g] += [tmp]
grouppred_cnt += 1
if output_verbose:
#output_color_point_cloud(pts[:, 6:], group_pred_final.astype(np.int32),
# os.path.join(OUTPUT_DIR, '%d_grouppred.obj' % (shape_idx)))
pts[:, 6] *= max_room_x
pts[:, 7] *= max_room_y
pts[:, 8] *= max_room_z
pts[:, 3:6] *= 255.0
ins = group_pred_final.astype(np.int32)
sem = seg_pred.astype(np.int32)
sem_softmax = seg_pred_softmax
sem_gt = seg_gt
ins_gt = cur_group.reshape(-1)
for i in range(pts.shape[0]):
fout_data_label.write('%f %f %f %d %d %d %f %d %d\n' % (
pts[i, 6], pts[i, 7], pts[i, 8], pts[i, 3], pts[i, 4], pts[i, 5], sem_softmax[i, sem[i]], sem[i], ins[i]))
fout_gt_label.write('%d %d\n' % (sem_gt[i], ins_gt[i]))
fout_data_label.close()
fout_gt_label.close()
fout_out_filelist.close()
if __name__ == "__main__":
test()
LOG_FOUT.close()
| StarcoderdataPython |
3398328 | # scope
gl = 1
def f(x):
global gl
gl += 2
lo1 = 3
lo2 = 4
lo3 = 5
def f2(x, y):
global gl
nonlocal lo3
lo3 = 5
lo4 = gl + lo2 + lo3
return f2
| StarcoderdataPython |
3530260 | <reponame>MrTanoshii/PyWeek-33-Metro<filename>src/save_data.py
import json
import src.const as C
from src.tracker import Tracker
from src.view.map import MapView
class GameData:
gold = None
level_data = {}
loadout = {}
story = {}
def __init__(self):
pass
# Load game data
@classmethod
def read_data(cls):
try:
with open("src/resources/gamedata.json", "r", encoding="utf-8") as file:
_data = json.load(file)
except FileNotFoundError:
print("ERROR 0: Regenerating game data")
cls.reset_data(gold=True, level=True, loadout=True, story=True)
_data = False
if _data:
try:
cls.gold = _data["coins"]
if cls.gold is None:
print("ERROR 2: Regenerating level data")
cls.reset_data(gold=True)
cls.read_data()
except KeyError:
print("ERROR 1: Regenerating level data")
cls.reset_data(gold=True)
cls.read_data()
try:
cls.level_data = _data["leveldata"]
if cls.level_data is None:
print("ERROR 2: Regenerating level data")
cls.reset_data(level=True)
cls.read_data()
except KeyError:
print("ERROR 1: Regenerating level data")
cls.reset_data(level=True)
cls.read_data()
try:
cls.loadout = _data["loadout"]
if cls.loadout is None:
print("ERROR 2: Regenerating loadout")
cls.reset_data(loadout=True)
cls.read_data()
except KeyError:
print("ERROR 1: Regenerating loadout")
cls.reset_data(loadout=True)
cls.read_data()
try:
cls.story = _data["story"]
if cls.story is None:
print("ERROR 2: Regenerating story")
cls.reset_data(story=True)
cls.read_data()
except KeyError:
print("ERROR 1: Regenerating story")
cls.reset_data(story=True)
cls.read_data()
else:
cls.read_data()
# Save game data
@classmethod
def write_data(cls):
with open("src/resources/gamedata.json", "w", encoding="utf-8") as file:
_data = json.dumps({
"coins": cls.gold,
"leveldata": cls.level_data,
"loadout": cls.loadout,
"story": cls.story})
file.write(_data)
@classmethod
def reset_data(cls, gold=False, level=False, loadout=False, story=False):
# Reset all data
if gold:
cls.gold = 100
if level:
cls.level_data = {
1: {"score": 0, "passed": 0, "locked": 1},
2: {"score": 0, "passed": 0, "locked": 1},
3: {"score": 0, "passed": 0, "locked": 1},
4: {"score": 0, "passed": 0, "locked": 1},
5: {"score": 0, "passed": 0, "locked": 1},
6: {"score": 0, "passed": 0, "locked": 1},
}
if loadout:
cls.loadout = {
"Revolver": {
"lvl": 1
},
"Rifle": {
"lvl": 0
},
"Shotgun": {
"lvl": 0
},
"RPG": {
"lvl": 0
}
}
if story:
cls.story = {"0": 1, "1": 0, "2": 0,
"3": 0, "4": 0, "5": 0, "6": 0}
# Write changes
cls.write_data()
@classmethod
def update_gold(cls, amount):
cls.gold = amount
# Write changes
cls.write_data()
@classmethod
def deposit_gold(cls):
current_gold = cls.gold
if current_gold != Tracker.gold:
cls.gold += Tracker.gold
Tracker.gold = 0
# Write changes
cls.write_data()
@classmethod
def update_highscore(cls, level):
current_highscore = cls.level_data[str(level)]["score"]
if Tracker.score > current_highscore:
cls.level_data[str(level)]["score"] = Tracker.score
if cls.level_data[str(level)]["score"] > 100 * level:
cls.level_data[str(level)]["passed"] = 1
cls.story[str(level)] = 1
if level < len(C.MAP_MONUMENTS_LIST):
cls.level_data[str(level+1)]["locked"] = 0
cls.story[str(level)] = 1
# Update map icons
MapView.update_monument_list()
MapView.update_step_list()
# Write changes
cls.write_data()
@classmethod
def update_loadout(cls, name, lvl):
for saved_weapon in cls.loadout:
if saved_weapon == name:
cls.loadout[saved_weapon]["lvl"] = lvl
# Write changes
cls.write_data()
break
@classmethod
def update_steps(cls, story_id: str, status: int):
current_story_status = cls.story[story_id]
if current_story_status != status:
cls.story[story_id] = status
# update levels
cls.update_levels(int(story_id)+1)
# Write changes
cls.write_data()
@classmethod
def update_levels(cls, level: int):
# player passed the level
if level <= 6:
cls.level_data[str(level)]["locked"] = 0
# Write changes
cls.write_data()
| StarcoderdataPython |
11270241 | <reponame>Anand1310/Benevolent-Bonobos
from typing import Callable, List
from blessed import Terminal
class Render(object):
"""Render class to put things on the screen
This class can be instantiated anywhere.
Example:
```
from maze_gitb.core.render import Render
render = Render()
render("this text", col="black", bg_col="lightskyblue1")
```
"""
__monostate = None
def __init__(self, col: str = "black", bg_col: str = "lightskyblue1"):
if not Render.__monostate:
Render.__monostate = self.__dict__
self.frames: List[str] = [""]
self.term = Terminal()
self.col = col
self.bg_col = bg_col
else:
self.__dict__ = Render.__monostate
def __call__(self, frame: str, col: str = None, bg_col: str = None) -> None:
"""Adds font color and background color on text"""
if col is None:
col = self.col
if bg_col is None:
bg_col = self.bg_col
paint: Callable[[str], str] = getattr(self.term, f"{col}_on_{bg_col}")
bg = getattr(self.term, f"on_{self.bg_col}")
frame = self.term.home + paint(frame) + bg
self.frames.append(frame)
def screen(self) -> str:
"""Get all the frames."""
frame = "".join(self.frames) + self.term.home
self.frames = [""]
return frame
| StarcoderdataPython |
11294627 | import sqlite3 as sql
import queries as qrs
import pandas as pd
# assignment 1
def connect_db(db='../rpg_db.sqlite3'):
return sql.connect(db)
def exec(conn, query):
curs = conn.cursor()
curs.execute(query)
res = curs.fetchall()
return res
# assignment 2
df = pd.DataFrame(pd.read_csv('../buddymove_holidayiq.csv'))
print(df.shape)
print(df.isnull().count())
conn = sql.connect('../buddymove_holidayiq.sqlite3')
# df.to_sql('review', conn)
# how many rows
row_count = 'SELECT COUNT(*) FROM review'
# how many users who reviewed at least 100 'Nature' and at least 100 in 'Shopping'
nature_and_shopping = 'SELECT COUNT(*) FROM review WHERE Nature >= 100 AND Shopping >= 100'
print(exec(conn, row_count))
print(exec(conn, nature_and_shopping))
| StarcoderdataPython |
13421 | <filename>packages/utils/propagate_license.py
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# propagate_license.py
# Created by <NAME> on 2014-05-16.
# Email: <EMAIL>
__license_header__ = """
{} Copyright 2014 Open Connectome Project (http://openconnecto.me)
{}
{} Licensed under the Apache License, Version 2.0 (the "License");
{} you may not use this file except in compliance with the License.
{} You may obtain a copy of the License at
{}
{} http://www.apache.org/licenses/LICENSE-2.0
{}
{} Unless required by applicable law or agreed to in writing, software
{} distributed under the License is distributed on an "AS IS" BASIS,
{} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{} See the License for the specific language governing permissions and
{} limitations under the License.
{}
"""
COMM_COUNT = 14
comm = {".py":"#", ".pyx":"#", "": "#", ".html":"", ".sh":"#", ".r":"#", ".m":"%", ".c":"//",
".c++":"//", ".java":"//", ".js":"//"}
import argparse
import os
def add(files):
global __license_header__
for full_fn in files:
license_header = __license_header__
print "Processing file: %s ..." % full_fn
script = open(full_fn, "rb")
lines = script.read().splitlines()
script.close()
# Exception for html
comment_style = comm[os.path.splitext(full_fn)[1].lower()]
if lines[0].startswith("#!/usr/bin"):
if lines[5].startswith("# Copyright"): # get rid of copyright year
del lines[5], lines[1]
lines.insert(1, license_header.format(*([comment_style]*COMM_COUNT)))
else:
#license_header += "{} Created by <NAME>\n{} Email: <EMAIL>".format(*([comment_style]*2))
if os.path.splitext(full_fn)[1].lower().strip() == ".html":
license_header = "<!-- " + license_header + " -->"
lines.insert(0, license_header.format(*([comment_style]*COMM_COUNT)))
script = open(full_fn, "wb")
script.write("\n".join(lines))
def hidden(path):
breakdown = path.split("/")
for item in breakdown:
if item.startswith("."):
return True
return False
def rm(dirname):
pass
def main():
parser = argparse.ArgumentParser(description="Add or Update license headers to code")
parser.add_argument("-r", "--remove", action="store_true", help="Remove the license")
parser.add_argument("-d", "--dirname", action="store", default=".", help="Directory where to start walk")
parser.add_argument("-f", "--files", action="store", nargs="*", help="Files you want license added to")
parser.add_argument("-e", "--file_exts", nargs="*", action="store", \
default=[".py", ".pyx", ".html", ".sh", ".R", ".m", ""], \
help="File extensions to add to the files altered")
parser.add_argument("-i", "--ignore", nargs="*", action="store", \
default=["README", "__init__.py", "TODO", __file__], \
help="Files to ignore")
result = parser.parse_args()
if result.files:
print "Licensing individual files ..."
add(result.files)
exit(1)
else:
print "Licensing a directory of files ..."
files = []
for root, dirnames, filenames in os.walk(os.path.abspath(result.dirname)):
for filename in filenames:
full_fn = os.path.join(root, filename)
if os.path.isfile(full_fn) and not hidden(full_fn) \
and not os.path.basename(full_fn) in result.ignore \
and ( os.path.splitext(full_fn)[-1].lower().strip() in result.file_exts ):
files.append(full_fn)
add(files)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1711175 | import os
import numpy as np
import pandas as pd
import pickle
import statsmodels.api as sm
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.linear_model import LogisticRegression, LinearRegression
input_path = "/Users/christianhilscher/Desktop/dynsim/input/"
model_path = "/Users/christianhilscher/desktop/dynsim/src/estimation/models/"
os.chdir("/Users/christianhilscher/desktop/dynsim/src/estimation/")
from standard import getdf, get_dependent_var
###############################################################################
def data_general(dataf, dep_var, estimate=1):
dataf = dataf.copy()
if estimate == 1:
dataf = get_dependent_var(dataf, dep_var)
else:
dataf = get_dependent_var(dataf, dep_var)
dataf.drop('dep_var', axis=1, inplace=True)
dataf.drop('personweight', axis=1, inplace=True)
vars_drop = ["pid",
"hid",
"orighid",
"age_max",
"predicted",
"lfs",
"working",
"fulltime",
"lfs_t1",
"working_t1",
"fulltime_t1"]
for var in vars_drop:
if var in dataf.columns.tolist():
dataf.drop(var, axis=1, inplace=True)
else:
pass
return dataf
def _prepare_classifier(dataf):
dataf = dataf.copy()
y = dataf['dep_var']
X = dataf.drop('dep_var', axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.05)
# Making weights
weights_train = X_train['personweight']
X_train.drop('personweight', axis=1, inplace=True)
weights_test = X_test['personweight']
X_test.drop('personweight', axis=1, inplace=True)
if "personweight_interacted" in X.columns.tolist():
X_train.drop('personweight_interacted', axis=1, inplace=True)
X_test.drop('personweight_interacted', axis=1, inplace=True)
else:
pass
# Scaling
X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))
X_test_scaled = StandardScaler().fit_transform(np.asarray(X_test))
# Coeffs feature_names
feature_names = X_train.columns.tolist()
# For Standard Part:
X_train = sm.add_constant(X_train)
X_test = sm.add_constant(X_test)
# For ML part:
lgb_train = lgb.Dataset(X_train_scaled, y_train,
weight = weights_train)
lgb_test = lgb.Dataset(X_test_scaled, y_test,
weight = weights_test)
out_dici = {'X_train': X_train_scaled,
'X_test': X_test_scaled,
'y_train': y_train,
'y_test': y_test,
'lgb_train': lgb_train,
'lgb_test': lgb_test,
'features': feature_names,
'weights': weights_train}
return out_dici
def _prepare_regressor(dataf):
dataf = dataf.copy()
y = dataf['dep_var']
X = dataf.drop('dep_var', axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.05)
# Making weights
weights_train = X_train['personweight']
X_train.drop('personweight', axis=1, inplace=True)
weights_test = X_test['personweight']
X_test.drop('personweight', axis=1, inplace=True)
# Scaling
X_train_scaled = StandardScaler().fit_transform(np.asarray(X_train))
X_test_scaled = StandardScaler().fit_transform(np.asarray(X_test))
y_train_scaled = StandardScaler().fit_transform(np.asarray(y_train).reshape(-1,1))
# Saving the scaler of the test data to convert the predicted values again
y_test_scaler = StandardScaler().fit(np.asarray(y_test).reshape(-1,1))
y_test_scaled = y_test_scaler.transform(np.asarray(y_test).reshape(-1,1))
feature_names = X_train.columns.tolist()
y_test_scaled = np.ravel(y_test_scaled)
y_train_scaled = np.ravel(y_train_scaled)
# For Standard Part:
X_train = sm.add_constant(X_train)
X_test = sm.add_constant(X_test)
# For ML part:
lgb_train = lgb.Dataset(X_train_scaled, y_train,
weight = weights_train)
lgb_test = lgb.Dataset(X_test_scaled, y_test,
weight = weights_test)
out_dici = {'X_train': X_train_scaled,
'X_test': X_test,
'y_train': y_train_scaled,
'y_test': y_test,
'scaler': y_test_scaler,
'lgb_train': lgb_train,
'lgb_test': lgb_test,
'features': feature_names,
'weights': weights_train}
return out_dici
def _estimate(dataf, dep_var, type):
dataf = dataf.copy()
dataf = data_general(dataf, dep_var)
dataf.dropna(inplace=True)
if type == 'regression':
dict = _prepare_regressor(dataf)
params = {'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective' : 'l2',
'metric' : 'l2',
'num_leaves' : 31,
'learning_rate' : 0.15,
'feature_fraction': [0.9],
'bagging_fraction': [0.8],
'bagging_freq': [5],
'verbose' : 5,
'early_stopping_rounds': 5}
elif type == 'binary':
dict = _prepare_classifier(dataf)
params = {'task' : 'train',
'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective': 'binary',
'eval_metric': 'logloss',
'learning_rate': 0.05,
'feature_fraction': [0.9],
'num_leaves': 31,
'verbose': 0,
'early_stopping_rounds': 5}
else:
dict = _prepare_classifier(dataf)
params = {'task' : 'train',
'boosting_type' : 'gbdt',
'n_estimators': 350,
'objective': 'multiclass',
'num_class': len(dict['y_train'].unique()),
'eval_metric': 'multi_logloss',
'learning_rate': 0.05,
'feature_fraction': [0.9],
'num_leaves': 31,
'verbose': 0,
'early_stopping_rounds': 5}
modl = lgb.train(params,
train_set = dict['lgb_train'],
valid_sets = dict['lgb_test'],
feature_name = dict['features'])
modl.save_model(model_path + dep_var + "_extended.txt")
#
# df = pd.read_pickle(input_path + 'illmitz10_reduced').dropna()
# df1 = getdf(df)
#
# _estimate(df1, "employment_status", "multiclass")
# _estimate(df1, "hours", "regression")
# _estimate(df1, "gross_earnings", "regression")
| StarcoderdataPython |
3463843 | from django.urls import path
from todos.views import TodoListCreateAPIView, TodoDetailAPIView
app_name = 'todos'
urlpatterns = [
path('', TodoListCreateAPIView.as_view(), name="list"),#point the URL to the as_view() class method instead,
# which provides a function-like entry to class-based views
path('<int:pk>/', TodoDetailAPIView.as_view(), name="detail"),#namespace of int, url of pk
]#<> capturs value of url, int is a converter type, if it wasn't there, any string would be matched.
#int Matches zero or any positive integer. Returns an int.
#in detail, use whatever num value was captured.
#.as_view() no state is held by the View class between one request and the next. If Django didn’t
# do this, you’d need to be excessively careful that you didn’t assign to some member of the view
# that would lead to different behaviour next time the view was run
#In Class-based views, you have to call as_view() function so as to return a callable view that
#takes a request and returns a response. Its the main entry-point in request-response cycle in
#case of generic views.
#You just can't use class-based views like you could in normal function-based views. | StarcoderdataPython |
6553300 | import argparse
import platform
import time
import screen_brightness_control as SBC
def get_monitors(args):
filtered = SBC.filter_monitors(display=args.display, method=args.method)
for monitor in filtered:
yield SBC.Monitor(monitor)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='screen_brightness_control')
parser.add_argument('-d', '--display', help='the display to be used')
parser.add_argument('-s', '--set', type=int, help='set the brightness to this value', metavar='VALUE')
parser.add_argument('-g', '--get', action='store_true', help='get the current screen brightness')
parser.add_argument('-f', '--fade', type=int, help='fade the brightness to this value', metavar='VALUE')
if platform.system() == 'Windows':
mthd = ('wmi', 'vcp')
elif platform.system() == 'Linux':
mthd = ('xrandr', 'ddcutil', 'light', 'xbacklight')
parser.add_argument('-m', '--method', type=str, help=f'specify which method to use ({" or ".join(mthd)})')
parser.add_argument('-l', '--list', action='store_true', help='list all monitors')
parser.add_argument('-v', '--verbose', action='store_true', help='some messages will be more detailed')
parser.add_argument('-V', '--version', action='store_true', help='print the current version')
args = parser.parse_args()
if args.display is not None:
if type(args.display) not in (str, int):
raise TypeError('display arg must be str or int')
if type(args.display) is str and args.display.isdigit():
args.display = int(args.display)
if (args.get, args.set) != (False, None):
try:
if args.get:
arrow = ':'
else:
arrow = ' ->'
for monitor in get_monitors(args):
name = monitor.name
if args.verbose:
name += f' ({monitor.serial}) [{monitor.method.__name__}]'
try:
if args.get:
ret_val = monitor.get_brightness()
else:
ret_val = monitor.set_brightness(args.set)
if ret_val is None:
raise Exception
print(f'{name}{arrow} {ret_val}%')
except Exception as e:
if args.verbose:
print(f'{name}{arrow} Failed: {e}')
else:
print(f'{name}{arrow} Failed')
except Exception:
kw = {'display': args.display, 'method': args.method, 'verbose_error': args.verbose}
if args.get:
print(SBC.get_brightness(**kw))
else:
print(SBC.set_brightness(args.set, **kw))
elif args.fade is not None:
try:
monitors = list(get_monitors(args))
for monitor in monitors:
monitor.initial_brightness = monitor.get_brightness()
monitor.fade_thread = monitor.fade_brightness(
args.fade,
blocking=False,
start=monitor.initial_brightness
)
while True:
done = []
for monitor in monitors:
if not monitor.fade_thread.is_alive():
name = monitor.name
if args.verbose:
name += f' ({monitor.serial}) [{monitor.method.__name__}]'
print(f'{name}: {monitor.initial_brightness}% -> {monitor.get_brightness()}%')
done.append(monitor)
monitors = [i for i in monitors if i not in done]
if monitors == []:
break
time.sleep(0.1)
except Exception:
print(
SBC.fade_brightness(
args.fade,
display=args.display,
method=args.method,
verbose_error=args.verbose
)
)
elif args.version:
print(SBC.__version__)
elif args.list:
if args.verbose:
monitors = SBC.list_monitors_info(method=args.method)
else:
monitors = SBC.list_monitors(method=args.method)
if len(monitors) == 0:
print('No monitors detected')
else:
for i in range(len(monitors)):
if type(monitors[i]) is str:
print(f'Display {i}: {monitors[i]}')
else:
msg = (
'Display {}:\n\t'
'Name: {}\n\t'
'Model: {}\n\t'
'Manufacturer: {}\n\t'
'Manufacturer ID: {}\n\t'
'Serial: {}\n\t'
'Method: {}\n\tEDID:'
)
msg = msg.format(
i,
monitors[i]['name'],
monitors[i]['model'],
monitors[i]['manufacturer'],
monitors[i]['manufacturer_id'],
monitors[i]['serial'],
monitors[i]['method'].__name__
)
# format the edid string
if monitors[i]['edid'] is not None:
# split str into pairs of characters
edid = [monitors[i]['edid'][j:j + 2] for j in range(0, len(monitors[i]['edid']), 2)]
# make the characters form 16 pair long lines
msg += '\n\t\t'
msg += '\n\t\t'.join([' '.join(edid[j:j + 16]) for j in range(0, len(edid), 16)])
else:
msg += ' None'
print(msg)
else:
print("No valid arguments")
| StarcoderdataPython |
11326077 | <filename>mpi_util.py
#!/usr/bin/python
import numpy as np
import ctypes
import os
import sys
from time import time, sleep, clock
from functools import reduce, partial, update_wrapper #wraps
from pyscf.lib.numpy_helper import ddot
from multiprocessing import Pool
# 1: map numpy dot, omp=cpu_per_task, 1
# 2: map pyscf ddot, omp=cpu_per_task, 1
# 3: pmmul use ddot, omp=cpu_per_task, 1
# 4: lOMPdgemm, omp=cpu_per_task, 1
# 5: ldgemm, omp=cpu_per_task, 1
# 6: lNPdgemm, omp=cpu_per_task, 1
# 7: map cdgemm, omp=cpu_per_task, 1
# 8: map cddot, omp=cpu_per_task, 1
mpi_util = ctypes.CDLL(r'/home/zhyou/mpi/mpi_util.so')
raw_partial = partial
def named_partial(raw_func, *args, **kwargs):
part_func = raw_partial(raw_func, *args, **kwargs)
update_wrapper(part_func, raw_func)
return part_func
partial = named_partial
def test_timer(func, r=1, i=1):
ct_l = np.zeros(r, dtype=np.float64)
wt_l = np.zeros(r, dtype=np.float64)
result = None
ct, wt = clock(), time()
for i in range(r):
ct_l[i], wt_l[i] = clock(), time()
if result == None:
result = func()
else:
func()
ct_l[i], wt_l[i] = clock() - ct_l[i], time() - wt_l[i]
ct, wt = clock() - ct, time() - wt
print "Using %s with ont: %2s, CPU time: %10.6f, wall time: %10.6f" % (func.__name__, os.environ['OMP_NUM_THREADS'], ct, wt)
print "CPU time average: %10.6f, wall time average: %10.6f" % (np.mean(ct_l), np.mean(wt_l))
print "CPU time stddevi: %10.6f, wall time stddevi: %10.6f" % (np.std(ct_l), np.std(wt_l))
print ""
sleep(i)
return result, ct_l, wt_l
def test_wrapper(func, matrix1=None, matrix2=None, a_T=0, b_T=0, rep=1, inter=1, ont=[1]):
resultlist = list()
for t in ont:
mpi_util.mpi_setONT(ctypes.c_int(t))
os.environ['OMP_NUM_THREADS'] = str(t)
result = test_timer(partial(func, matrix1, matrix2, a_T, b_T), r=repeats)
resultlist.append({'result': result[0], 'ctlist': result[1], 'wtlist': result[2], 'name': func.__name__, 'ont': t})
sleep(inter)
return resultlist
def mmul(matrix):
result =ddot(matrix[0], matrix[1])
return result
def mTmul(matrix):
result =ddot(matrix[0].T, matrix[1])
return result
def mmTul(matrix):
result =ddot(matrix[0], matrix[1].T)
return result
def mTmTul(matrix):
result =ddot(matrix[0].T, matrix[1].T)
return result
def pmmul(matrix1,matrix2,trans_a=0,trans_b=0):
nprocs = int(int(os.environ["SLURM_CPUS_PER_TASK"])/2)
P = Pool(processes=nprocs,maxtasksperchild=int(len(matrix1)/nprocs+1))
if(trans_a==1 and trans_b==0):
result = P.map(mTmul,zip(matrix1,matrix2))
elif(trans_a==1 and trans_b==1):
result = P.map(mTmTul,zip(matrix1,matrix2))
elif(trans_a==0 and trans_b==0):
result = P.map(mmul,zip(matrix1,matrix2))
else:
result = P.map(mmTul,zip(matrix1,matrix2))
P.close()
P.join()
return result
def dotbypmmul(matrix1, matrix2, trans_a=0, trans_b=0):
pass
def mpi_init():
mpi_util.mpi_init()
if int(os.environ['PMI_RANK']) != 0:
exit(0)
def mpi_final():
mpi_util.mpi_final()
def cddot(a, b, alpha=1, c=None, beta=0, ):
'''Matrix-matrix multiplication for double precision arrays
'''
m = a.shape[0]
k = a.shape[1]
n = b.shape[1]
if a.flags.c_contiguous:
trans_a = 'N'
elif a.flags.f_contiguous:
trans_a = 'T'
a = a.T
else:
a = np.asarray(a, order='C')
trans_a = 'N'
#raise ValueError('a.flags: %s' % str(a.flags))
assert(k == b.shape[0])
if b.flags.c_contiguous:
trans_b = 'N'
elif b.flags.f_contiguous:
trans_b = 'T'
b = b.T
else:
b = np.asarray(b, order='C')
trans_b = 'N'
#raise ValueError('b.flags: %s' % str(b.flags))
if c is None:
c = np.empty((m,n))
beta = 0
else:
assert(c.shape == (m,n))
offseta=0
offsetb=0
offsetc=0
if a.size == 0 or b.size == 0:
if beta == 0:
c[:] = 0
else:
c[:] *= beta
return c
assert(a.flags.c_contiguous)
assert(b.flags.c_contiguous)
assert(c.flags.c_contiguous)
mpi_util.NPdgemm(ctypes.c_char(trans_b.encode('ascii')),
ctypes.c_char(trans_a.encode('ascii')),
ctypes.c_int(n), ctypes.c_int(m), ctypes.c_int(k),
ctypes.c_int(b.shape[1]), ctypes.c_int(a.shape[1]),
ctypes.c_int(c.shape[1]),
ctypes.c_int(offsetb), ctypes.c_int(offseta),
ctypes.c_int(offsetc),
b.ctypes.data_as(ctypes.c_void_p),
a.ctypes.data_as(ctypes.c_void_p),
c.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(alpha), ctypes.c_double(beta))
return c
def cdgemm(a, b, alpha=1, c=None, beta=0, ):
'''Matrix-matrix multiplication for double precision arrays
'''
m = a.shape[0]
k = a.shape[1]
n = b.shape[1]
if a.flags.c_contiguous:
trans_a = 'N'
elif a.flags.f_contiguous:
trans_a = 'T'
a = a.T
else:
a = np.asarray(a, order='C')
trans_a = 'N'
#raise ValueError('a.flags: %s' % str(a.flags))
assert(k == b.shape[0])
if b.flags.c_contiguous:
trans_b = 'N'
elif b.flags.f_contiguous:
trans_b = 'T'
b = b.T
else:
b = np.asarray(b, order='C')
trans_b = 'N'
#raise ValueError('b.flags: %s' % str(b.flags))
if c is None:
c = np.empty((m,n))
beta = 0
else:
assert(c.shape == (m,n))
offseta=0
offsetb=0
offsetc=0
if a.size == 0 or b.size == 0:
if beta == 0:
c[:] = 0
else:
c[:] *= beta
return c
assert(a.flags.c_contiguous)
assert(b.flags.c_contiguous)
assert(c.flags.c_contiguous)
mpi_util.dgemm(ctypes.c_char(trans_b.encode('ascii')),
ctypes.c_char(trans_a.encode('ascii')),
ctypes.c_int(n), ctypes.c_int(m), ctypes.c_int(k),
ctypes.c_int(b.shape[1]), ctypes.c_int(a.shape[1]),
ctypes.c_int(c.shape[1]),
b.ctypes.data_as(ctypes.c_void_p),
a.ctypes.data_as(ctypes.c_void_p),
c.ctypes.data_as(ctypes.c_void_p),
ctypes.c_double(alpha), ctypes.c_double(beta))
return c
def imple_protocol(protocol, real_func):
return_func = partial(protocol, func=real_func)
return_func.__name__ = real_func.__name__
return return_func
def map_dot_protocol(a, b, a_T=0, b_T=0, func=np.dot):
if a_T == 0:
if b_T == 0:
return list(map(lambda x: func(x[0], x[1]), zip(a, b)))
else:
return list(map(lambda x: func(x[0], x[1].T), zip(a, b)))
else:
if b_T == 0:
return list(map(lambda x: func(x[0].T, x[1]), zip(a, b)))
else:
return list(map(lambda x: func(x[0].T, x[1].T), zip(a, b)))
def mpi_dot_protocol(a, b, a_T=0, b_T=0, func=mpi_util.mpi_lNPdgemm):
assert(len(a) == len(b))
matrix_num = len(a)
c = []
#a_transposed = []
#b_transposed = []
ptr2a = np.empty(matrix_num, dtype=ctypes.c_void_p)
ptr2b = np.empty(matrix_num, dtype=ctypes.c_void_p)
ptr2c = np.empty(matrix_num, dtype=ctypes.c_void_p)
m_arr = np.empty(matrix_num, dtype=np.int32)
k_arr = np.empty(matrix_num, dtype=np.int32)
n_arr = np.empty(matrix_num, dtype=np.int32)
lda_arr = np.empty(matrix_num, dtype=np.int32)
ldb_arr = np.empty(matrix_num, dtype=np.int32)
ldc_arr = np.empty(matrix_num, dtype=np.int32)
tra_a = np.empty(matrix_num, dtype=bytes)
tra_b = np.empty(matrix_num, dtype=bytes)
completed = np.zeros(matrix_num, dtype=np.int32)
offfseta = np.zeros(matrix_num, dtype=np.int32)
offfsetb = np.zeros(matrix_num, dtype=np.int32)
offfsetc = np.zeros(matrix_num, dtype=np.int32)
alpha = np.ones(matrix_num, dtype=np.float64)
beta = np.zeros(matrix_num, dtype=np.float64)
for i in range(matrix_num):
if a_T == 0:
m_arr[i] = a[i].shape[0]
k_arr[i] = a[i].shape[1]
if a[i].flags.c_contiguous:
tra_a[i] = 'N'.encode('ascii')
lda_arr[i] = a[i].shape[1]
elif a[i].flags.f_contiguous:
tra_a[i] = 'T'.encode('ascii')
lda_arr[i] = a[i].shape[0]
#a[i] = a[i].T
#a_transposed.append(i)
else:
a[i] = np.asarray(a[i], order='C')
tra_a[i] = 'N'.encode('ascii')
lda_arr[i] = a[i].shape[1]
#raise ValueError('a.flags: %s' % str(a.flags))
else:
m_arr[i] = a[i].shape[1]
k_arr[i] = a[i].shape[0]
if a[i].flags.c_contiguous:
tra_a[i] = 'T'.encode('ascii')
lda_arr[i] = a[i].shape[1]
elif a[i].flags.f_contiguous:
tra_a[i] = 'N'.encode('ascii')
lda_arr[i] = a[i].shape[0]
#a[i] = a[i].T
#a_transposed.append(i)
else:
a[i] = np.asarray(a[i], order='C')
tra_a[i] = 'T'.encode('ascii')
lda_arr[i] = a[i].shape[1]
#raise ValueError('a.flags: %s' % str(a.flags))
if b_T == 0:
assert(k_arr[i] == b[i].shape[0])
n_arr[i] = b[i].shape[1]
if b[i].flags.c_contiguous:
tra_b[i] = 'N'.encode('ascii')
ldb_arr[i] = b[i].shape[1]
elif b[i].flags.f_contiguous:
tra_b[i] = 'T'.encode('ascii')
ldb_arr[i] = b[i].shape[0]
#b[i] = b[i].T
#b_transposed.append(i)
else:
b[i] = np.asarray(b[i], order='C')
tra_b[i] = 'N'.encode('ascii')
ldb_arr[i] = b[i].shape[1]
#raise ValueError('b.flags: %s' % str(b.flags))
else:
assert(k_arr[i] == b[i].shape[1])
n_arr[i] = b[i].shape[0]
if b[i].flags.c_contiguous:
tra_b[i] = 'T'.encode('ascii')
ldb_arr[i] = b[i].shape[1]
elif b[i].flags.f_contiguous:
tra_b[i] = 'N'.encode('ascii')
ldb_arr[i] = b[i].shape[0]
#b[i] = b[i].T
#b_transposed.append(i)
else:
b[i] = np.asarray(b[i], order='C')
tra_b[i] = 'T'.encode('ascii')
ldb_arr[i] = b[i].shape[1]
#raise ValueError('b.flags: %s' % str(b.flags))
c.append(np.empty((m_arr[i], n_arr[i]), dtype=np.float64, order='C'))
if a[i].size == 0 or b[i].size == 0:
if beta[i] == 0:
c[i][:] = 0
else:
c[i][:] *= beta
completed[i] = 1
#print "ERROR!!"
#assert(a[i].flags.c_contiguous)
#assert(b[i].flags.c_contiguous)
ptr2a[i] = a[i].ctypes.data
ptr2b[i] = b[i].ctypes.data
ptr2c[i] = c[i].ctypes.data
#lda_arr[i] = a[i].shape[1]
#ldb_arr[i] = b[i].shape[1]
ldc_arr[i] = c[i].shape[1]
#END FOR
func(tra_b.ctypes.data_as(ctypes.c_void_p),
tra_a.ctypes.data_as(ctypes.c_void_p),
n_arr.ctypes.data_as(ctypes.c_void_p),
m_arr.ctypes.data_as(ctypes.c_void_p),
k_arr.ctypes.data_as(ctypes.c_void_p),
ldb_arr.ctypes.data_as(ctypes.c_void_p),
lda_arr.ctypes.data_as(ctypes.c_void_p),
ldc_arr.ctypes.data_as(ctypes.c_void_p),
offfsetb.ctypes.data_as(ctypes.c_void_p),
offfseta.ctypes.data_as(ctypes.c_void_p),
offfsetc.ctypes.data_as(ctypes.c_void_p),
ptr2b.ctypes.data_as(ctypes.c_void_p),
ptr2a.ctypes.data_as(ctypes.c_void_p),
ptr2c.ctypes.data_as(ctypes.c_void_p),
alpha.ctypes.data_as(ctypes.c_void_p),
beta.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(matrix_num),
completed.ctypes.data_as(ctypes.c_void_p))
return c
def mpi_print(l, a):
mpi_util.mpi_print(ctypes.c_int(l), a.ctypes.data_as(ctypes.c_void_p))
def omp_print(l, a):
mpi_util.omp_print(ctypes.c_int(l), a.ctypes.data_as(ctypes.c_void_p))
def test_checker(resultlist, threshold=1e-3):
if len(resultlist) < 2:
print "Only one result, skip checking!"
else:
base = resultlist[0]
print "Start checking!"
print "Using function", base['name'], "with OMP_NUM_THREADS =", base['ont'], "as base"
for i in resultlist:
if not i is base:
print "Checking", i['name'], "against base:"
for index in range(len(base['result'])):
if not (np.abs(i['result'][index] - base['result'][index]) < threshold).all():
print "Not equal at index", index#, "Summed abs difference:"
print base['result'][index]
print '@@@@@@@@@@@@@@@@@@@@@@@'
print i['result'][index]
#print np.abs(i['result'][index] - base['result'][index]).sum()
print "------------------------------------"
print "Finish checking", i['name']
print "End of checking!"
if __name__ == '__main__':
np.random.seed(0)
mpi_init()
#np.set_printoptions(threshold=np.nan)
l = 1
low = 1000
m = 0
k = 0
n = 0
#p = 0
hig = low + 1
interval = 0
repeats = 7
a = []
b = []
c = []
T_cddot = np.zeros(repeats, dtype=np.float64)
T_lddot = np.zeros(repeats, dtype=np.float64)
m = np.random.randint(low, high=hig)
k = np.random.randint(low, high=hig)
n = np.random.randint(low, high=hig)
dummy = 0
print "Start to generate ramdom square matrix list"
T_gen = time()
for i in range(l):
# m = np.random.randint(low, high=hig)
# k = np.random.randint(low, high=hig)
# n = np.random.randint(low, high=hig)
#p = np.random.randint(low, high=hig)
# temp = np.random.rand(m,k) * 2
# #temp2 = np.random.rand(,)
# a.append()
# b.append(temp)
# c.append(np.random.rand(n,p) * 20)
#b[i] = b[i].T
# a.append(np.array(np.ones((m,m)), dtype=np.float64))
# b.append(np.array(np.ones((m,m)), dtype=np.float64))
# a.append(np.array(np.zeros((m,m)), dtype=np.float64))
# b.append(np.array(np.zeros((m,m)), dtype=np.float64))
a.append(np.array(np.random.rand(m,m), dtype=np.float64))
b.append(np.array(np.random.rand(m,m), dtype=np.float64))
# a.append(np.array(np.arange(m * k), dtype=np.float64).reshape(m, k) + 2 * i)
# b.append(np.array(np.arange(n * k), dtype=np.float64).reshape(k, n) + 2 * i + 1)
# print a[50]
# print "============"
# print b[50]
print "Finish generating the random matrix of length:", l, "and size:",m, k, n#, p
print "Time used:", time() - T_gen
print "Running test cases for", repeats, "cycles"
wrapper = partial(test_wrapper, matrix1=a, matrix2=b, a_T = 0, b_T = 0, rep=repeats, inter=interval)
funclist = [imple_protocol(map_dot_protocol, np.dot),
imple_protocol(map_dot_protocol, ddot),
#pmmul,
#imple_protocol(mpi_dot_protocol, mpi_util.mpi_ldgemm),
#imple_protocol(mpi_dot_protocol, mpi_util.mpi_lOMPdgemm),
imple_protocol(mpi_dot_protocol, mpi_util.mpi_lNPdgemm),
#imple_protocol(map_dot_protocol, cdgemm),
#imple_protocol(map_dot_protocol, cddot),
]
resultlist = list(map(wrapper, funclist))
result = []
#print len(resultlist[0])
for i in resultlist:
result +=i
#print len(result)
test_checker(result)
# 1: map numpy dot, omp=cpu_per_task, 1
# 2: map pyscf ddot, omp=cpu_per_task, 1
# 3: pmmul use ddot, omp=cpu_per_task, 1
# 4: lOMPdgemm, omp=cpu_per_task, 1
# 5: ldgemm, omp=cpu_per_task, 1
# 6: lNPdgemm, omp=cpu_per_task, 1
# 7: map cdgemm, omp=cpu_per_task, 1
# 8: map cddot, omp=cpu_per_task, 1
print 'time'
for i in xrange(len(result)):
sys.stdout.write(str(np.mean(result[i]['wtlist'])) + ' ')
if (i + 1) % 5 == 0:
print ''
print ''
print 'stdev'
for i in xrange(len(result)):
sys.stdout.write(str(np.std(result[i]['wtlist'])) + ' ')
if (i + 1) % 5 == 0:
print ''
#
mpi_final()
| StarcoderdataPython |
1760611 | from .models import Feedback
class FeedbackDAO:
def save_feedback(is_anonymous, user_id, title, content, feedbackid):
new_feedback = Feedback(feedbackid=feedbackid, anonimity=is_anonymous, title=title, content=content, userid=user_id)
new_feedback.save()
def getFeedbacks():
feedbacks = Feedback.objects.all()
return feedbacks
def getFeedback(feedbackid):
feedback = Feedback.objects.get(feedbackid = feedbackid)
return feedback
| StarcoderdataPython |
6410285 | class SymbolTable(object):
def __init__(self):
self._symbols = {}
def __str__(self):
symtab_header = 'Tabela de Simbolos'
lines = ['\n', symtab_header, '_' * len(symtab_header)]
lines.extend(
('%7s: %r' % (key, value))
for key, value in self._symbols.items()
)
s = '\n'.join(lines)
return s
def insert(self, symbol, type, scope):
print('Insert: ' + symbol + ' with type ' + type + ' with scope ' + str(scope) + '\n')
resposta = self.find(symbol)
if resposta is None: # só insere declaração se não tiver nenhuma
self._symbols[symbol] = [type,scope]
else:
raise Exception(
'Símbolo ' + symbol + ' já declarado como ' + resposta
)
def find(self, name):
symbol = self._symbols.get(name)
return symbol | StarcoderdataPython |
77983 | <filename>seleniumbase/translate/japanese.py
# Japanese Language Translations - Python 3 Only!
from seleniumbase import BaseCase
class セレンテストケース(BaseCase): # noqa
def URLを開く(self, *args, **kwargs):
# open(url)
self.open(*args, **kwargs)
def クリックして(self, *args, **kwargs):
# click(selector)
self.click(*args, **kwargs)
def ダブルクリックして(self, *args, **kwargs):
# double_click(selector)
self.double_click(*args, **kwargs)
def ゆっくりクリックして(self, *args, **kwargs):
# slow_click(selector)
self.slow_click(*args, **kwargs)
def リンクテキストをクリックします(self, *args, **kwargs):
# click_link_text(link_text)
self.click_link_text(*args, **kwargs)
def テキストを更新(self, *args, **kwargs):
# update_text(selector, new_value)
self.update_text(*args, **kwargs)
def テキストを追加(self, *args, **kwargs):
# add_text(selector, new_value)
self.add_text(*args, **kwargs)
def テキストを取得(self, *args, **kwargs):
# get_text(selector, new_value)
self.get_text(*args, **kwargs)
def テキストを確認する(self, *args, **kwargs):
# assert_text(text, selector)
self.assert_text(*args, **kwargs)
def 正確なテキストを確認する(self, *args, **kwargs):
# assert_exact_text(text, selector)
self.assert_exact_text(*args, **kwargs)
def 要素を確認する(self, *args, **kwargs):
# assert_element(selector)
self.assert_element(*args, **kwargs)
def タイトルを確認(self, *args, **kwargs):
# assert_title(title)
self.assert_title(*args, **kwargs)
def 検証が正しい(self, *args, **kwargs):
# assert_true(expr)
self.assert_true(*args, **kwargs)
def 検証は偽です(self, *args, **kwargs):
# assert_false(expr)
self.assert_false(*args, **kwargs)
def 検証が等しい(self, *args, **kwargs):
# assert_equal(first, second)
self.assert_equal(*args, **kwargs)
def 検証が等しくない(self, *args, **kwargs):
# assert_not_equal(first, second)
self.assert_not_equal(*args, **kwargs)
def ページを更新する(self, *args, **kwargs):
# refresh_page()
self.refresh_page(*args, **kwargs)
def 現在のURLを取得(self, *args, **kwargs):
# get_current_url()
self.get_current_url(*args, **kwargs)
def ページのソースコードを取得する(self, *args, **kwargs):
# get_page_source()
self.get_page_source(*args, **kwargs)
def 戻る(self, *args, **kwargs):
# go_back()
self.go_back(*args, **kwargs)
def 進む(self, *args, **kwargs):
# go_forward()
self.go_forward(*args, **kwargs)
def テキストが表示されています(self, *args, **kwargs):
# is_text_visible(text, selector="html")
self.is_text_visible(*args, **kwargs)
def 要素は表示されますか(self, *args, **kwargs):
# is_element_visible(selector)
self.is_element_visible(*args, **kwargs)
def 要素が存在するかどうか(self, *args, **kwargs):
# is_element_present(selector)
self.is_element_present(*args, **kwargs)
def テキストを待つ(self, *args, **kwargs):
# wait_for_text(text, selector)
self.wait_for_text(*args, **kwargs)
def 要素を待つ(self, *args, **kwargs):
# wait_for_element(selector)
self.wait_for_element(*args, **kwargs)
def 眠る(self, *args, **kwargs):
# sleep(seconds)
self.sleep(*args, **kwargs)
def を提出す(self, *args, **kwargs):
# submit(selector)
self.submit(*args, **kwargs)
def JSクリックして(self, *args, **kwargs):
# js_click(selector)
self.js_click(*args, **kwargs)
def htmlをチェック(self, *args, **kwargs):
# inspect_html()
self.inspect_html(*args, **kwargs)
def スクリーンショットを保存(self, *args, **kwargs):
# save_screenshot(name)
self.save_screenshot(*args, **kwargs)
def ファイルを選択(self, *args, **kwargs):
# choose_file(selector, file_path)
self.choose_file(*args, **kwargs)
def スクリプトを実行する(self, *args, **kwargs):
# execute_script(script)
self.execute_script(*args, **kwargs)
def 広告ブロック(self, *args, **kwargs):
# ad_block()
self.ad_block(*args, **kwargs)
def スキップする(self, *args, **kwargs):
# skip(reason="")
self.skip(*args, **kwargs)
def リンク切れを確認する(self, *args, **kwargs):
# assert_no_404_errors()
self.assert_no_404_errors(*args, **kwargs)
def JSエラーを確認する(self, *args, **kwargs):
# assert_no_js_errors()
self.assert_no_js_errors(*args, **kwargs)
def フレームに切り替え(self, *args, **kwargs):
# switch_to_frame(frame)
self.switch_to_frame(*args, **kwargs)
def デフォルトのコンテンツに切り替える(self, *args, **kwargs):
# switch_to_default_content()
self.switch_to_default_content(*args, **kwargs)
def 新しいウィンドウを開く(self, *args, **kwargs):
# open_new_window()
self.open_new_window(*args, **kwargs)
def ウィンドウに切り替え(self, *args, **kwargs):
# switch_to_window(window)
self.switch_to_window(*args, **kwargs)
def デフォルトのウィンドウに切り替える(self, *args, **kwargs):
# switch_to_default_window()
self.switch_to_default_window(*args, **kwargs)
def ハイライト(self, *args, **kwargs):
# highlight(selector)
self.highlight(*args, **kwargs)
def ハイライトしてクリックして(self, *args, **kwargs):
# highlight_click(selector)
self.highlight_click(*args, **kwargs)
def スクロールして(self, *args, **kwargs):
# scroll_to(selector)
self.scroll_to(*args, **kwargs)
def 一番上までスクロール(self, *args, **kwargs):
# scroll_to_top()
self.scroll_to_top(*args, **kwargs)
def 一番下までスクロール(self, *args, **kwargs):
# scroll_to_bottom()
self.scroll_to_bottom(*args, **kwargs)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.