hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72ff241d91ef455622d3abfa9df1af912c8d07d | 6,207 | py | Python | pyqtgraph/console/template_pyqt5.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | 1 | 2022-01-30T20:04:51.000Z | 2022-01-30T20:04:51.000Z | pyqtgraph/console/template_pyqt5.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | null | null | null | pyqtgraph/console/template_pyqt5.py | StSav012/pyqtgraph | 65e17c4e3707eb3bd4d91cdc13504d9b150f4360 | [
"MIT"
] | null | null | null |
# Form implementation generated from reading ui file 'pyqtgraph/console/template.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(739, 497)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtWidgets.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtWidgets.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtWidgets.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtWidgets.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setWordWrap(True)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtWidgets.QLabel(self.exceptionGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName("filterText")
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Console"))
self.historyBtn.setText(_translate("Form", "History.."))
self.exceptionBtn.setText(_translate("Form", "Exceptions.."))
self.exceptionGroup.setTitle(_translate("Form", "Exception Handling"))
self.clearExceptionBtn.setText(_translate("Form", "Clear Stack"))
self.catchAllExceptionsBtn.setText(_translate("Form", "Show All Exceptions"))
self.catchNextExceptionBtn.setText(_translate("Form", "Show Next Exception"))
self.onlyUncaughtCheck.setText(_translate("Form", "Only Uncaught Exceptions"))
self.runSelectedFrameCheck.setText(_translate("Form", "Run commands in selected stack frame"))
self.exceptionInfoLabel.setText(_translate("Form", "Stack Trace"))
self.label.setText(_translate("Form", "Filter (regex):"))
from .CmdInput import CmdInput
| 53.973913 | 114 | 0.72241 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(739, 497)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtWidgets.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtWidgets.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtWidgets.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtWidgets.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setWordWrap(True)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtWidgets.QLabel(self.exceptionGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName("filterText")
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Console"))
self.historyBtn.setText(_translate("Form", "History.."))
self.exceptionBtn.setText(_translate("Form", "Exceptions.."))
self.exceptionGroup.setTitle(_translate("Form", "Exception Handling"))
self.clearExceptionBtn.setText(_translate("Form", "Clear Stack"))
self.catchAllExceptionsBtn.setText(_translate("Form", "Show All Exceptions"))
self.catchNextExceptionBtn.setText(_translate("Form", "Show Next Exception"))
self.onlyUncaughtCheck.setText(_translate("Form", "Only Uncaught Exceptions"))
self.runSelectedFrameCheck.setText(_translate("Form", "Run commands in selected stack frame"))
self.exceptionInfoLabel.setText(_translate("Form", "Stack Trace"))
self.label.setText(_translate("Form", "Filter (regex):"))
from .CmdInput import CmdInput
| true | true |
f72ff2fe325c5f00daf0160abd5517c0408afd68 | 9,889 | py | Python | helper/phillips_hue_wrapper.py | andrewtatham/enviroplus-python | 213eee4ab7c72cafd4d5fc5a33eb24397b665822 | [
"MIT"
] | null | null | null | helper/phillips_hue_wrapper.py | andrewtatham/enviroplus-python | 213eee4ab7c72cafd4d5fc5a33eb24397b665822 | [
"MIT"
] | null | null | null | helper/phillips_hue_wrapper.py | andrewtatham/enviroplus-python | 213eee4ab7c72cafd4d5fc5a33eb24397b665822 | [
"MIT"
] | null | null | null | import datetime
import pprint
import random
import time
from itertools import cycle
from phue import Bridge
from helper import colour_helper
class HueWrapper(object):
def __init__(self, bridge_ip='192.168.1.73', light_configs=None, profiles=None):
if not light_configs:
light_configs = [
{'name': 'Hue color spot 1', 'is_colour': True},
{'name': 'Hue color spot 2', 'is_colour': True},
{'name': 'Hue color spot 3', 'is_colour': True},
{'name': 'DEATH STAR', 'is_colour': True},
{'name': 'Right Colour Strip', 'is_colour': True},
{'name': 'Right White Strip', 'is_colour': False},
{'name': 'Left Colour Strip', 'is_colour': True},
{'name': 'Left White Strip', 'is_colour': False},
]
if not profiles:
self.bright_white_mode = {
'name': 'bright white',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Hue color spot 2': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Hue color spot 3': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'DEATH STAR': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Right Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Right White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Left Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
}
}
self.normal_mode = {
'name': 'normal',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': False, 'light_state': {}, 'func': None},
'Hue color spot 2': {'is_on': False, 'light_state': {}, 'func': None},
'Hue color spot 3': {'is_on': False, 'light_state': {}, 'func': None},
'DEATH STAR': {'is_on': False, 'light_state': {}, 'func': None},
'Right Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Right White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Left Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
}
}
self.colour_mode = {
'name': 'colour',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Hue color spot 2': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Hue color spot 3': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'DEATH STAR': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Right Colour Strip': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Right White Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left Colour Strip': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Left White Strip': {'is_on': False, 'light_state': {}, 'func': None},
}
}
profiles = [
self.bright_white_mode,
self.normal_mode,
self.colour_mode,
]
self.light_configs = light_configs
self.profiles = cycle(profiles)
self.profile = next(self.profiles)
self.bridge_ip = bridge_ip
self.b = None
self.lights = []
def connect(self):
self.b = Bridge(self.bridge_ip)
self.b.connect()
pprint.pprint(self.b.get_api())
for actual_light in self.b.lights:
name = actual_light.name
for light_config in self.light_configs:
if light_config['name'] == name:
name += " *"
actual_light.is_colour = light_config['is_colour']
self.lights.append(actual_light)
print(name)
if self.lights:
print("connected")
for actual_light in self.lights:
pprint.pprint(actual_light.__dict__)
def on(self):
for light in self.lights:
light.on = True
def colour_temperature(self, temp):
# (white only) 154 is the coolest, 500 is the warmest
for light in self.lights:
light.colortemp = temp
def xy(self, x, y):
# co-ordinates in CIE 1931 space
for light in self.lights:
if light.is_colour:
light.xy = (x, y)
def random_colour(self):
for light in self.lights:
if light.is_colour:
light.xy = [random.random(), random.random()]
def hue(self, hue, sat=254):
# hue' parameter has the range 0-65535 so represents approximately 182*degrees
# sat is 0-254?
for light in self.lights:
light.hue = hue
light.saturation = sat
def brightness(self, bright):
# // brightness between 0-254 (NB 0 is not off!)
for light in self.lights:
light.bri = bright
def colour_loop_off(self):
for light in self.lights:
if light.is_colour:
light.effect = "none"
def colour_loop_on(self):
for light in self.lights:
if light.is_colour:
light.effect = "colorloop"
def flash_once(self):
for light in self.lights:
light.alert = "select"
def flash_multiple(self):
for light in self.lights:
light.alert = "lselect"
def flash_off(self):
for light in self.lights:
light.alert = None
def off(self):
for light in self.lights:
light.on = False
@property
def is_on(self):
on = False
for light in self.lights:
on = on or light.on
return on
@property
def is_off(self):
return not self.is_on
def set_hsv(self, h, s, v):
h = int(h * 65535)
s = int(s * 255)
v = int(v * 255)
print((h, s, v))
for light in self.lights:
if light.is_colour:
light.hue = h
light.sat = s
light.bri = v
def quick_transitions(self):
for light in self.lights:
light.transitiontime = 0
def sleep(self, seconds):
time.sleep(seconds)
def next_profile(self):
self.profile = next(self.profiles)
def do_whatever(self):
now = datetime.datetime.now()
weekday = now.weekday()
hour = now.hour
monday = 0
friday = 4
saturday = 5
sunday = 6
is_daytime = 8 <= hour <= 18
in_work_hours = monday <= weekday <= friday and is_daytime
is_weekend = saturday <= weekday <= sunday
if in_work_hours:
self.profile = self.bright_white_mode
else:
if is_weekend:
self.profile = self.colour_mode
else:
self.profile = self.normal_mode
if is_daytime:
bright = 254
else:
bright = 8
if self.is_on:
for light in self.lights:
light_profile = self.profile['lights'][light.name]
profile_state = self.profile['profile_state']
if light_profile:
if light_profile['is_on'] != light.on:
light.on = light_profile['is_on']
light_func = light_profile['func']
light_state = light_profile['light_state']
if light_profile['is_on'] and light_func:
light_func(light=light, light_state=light_state, profile_state=profile_state, bright=bright)
def _normal_func(self, light, **kwargs):
# (white only) 154 is the coolest, 500 is the warmest
ct = 500 + int(colour_helper.day_factor * (154 - 500))
if "bright" in kwargs and kwargs["bright"]:
brightness = kwargs["bright"]
else:
# // brightness between 0-254 (NB 0 is not off!)
brightness = int(colour_helper.day_factor * 254)
light.colortemp = ct
light.brightness = brightness
pass
def _colour_func(self, light, **kwargs):
# hue' parameter has the range 0-65535 so represents approximately 182*degrees
minute = datetime.datetime.now().minute
hue = int(minute/59 * 65535)
sat = 254
if "bright" in kwargs and kwargs["bright"]:
brightness = kwargs["bright"]
else:
# // brightness between 0-254 (NB 0 is not off!)
brightness = int(colour_helper.day_factor * 254)
light.hue = hue
light.saturation = sat
light.brightness = brightness
if __name__ == '__main__':
hue = HueWrapper()
hue.connect()
hue.on()
hue.brightness(254)
hue.colour_temperature(154)
hue.sleep(5)
hue.colour_temperature(500)
hue.sleep(5)
hue.colour_temperature(154)
hue.sleep(5)
# for _ in range(5):
# hue.random_colour()
# hue.sleep(1)
#
# hue.colour_loop_on()
# hue.sleep(10)
# hue.colour_loop_off()
# hue.sleep(10)
hue.off()
| 35.317857 | 116 | 0.522095 | import datetime
import pprint
import random
import time
from itertools import cycle
from phue import Bridge
from helper import colour_helper
class HueWrapper(object):
def __init__(self, bridge_ip='192.168.1.73', light_configs=None, profiles=None):
if not light_configs:
light_configs = [
{'name': 'Hue color spot 1', 'is_colour': True},
{'name': 'Hue color spot 2', 'is_colour': True},
{'name': 'Hue color spot 3', 'is_colour': True},
{'name': 'DEATH STAR', 'is_colour': True},
{'name': 'Right Colour Strip', 'is_colour': True},
{'name': 'Right White Strip', 'is_colour': False},
{'name': 'Left Colour Strip', 'is_colour': True},
{'name': 'Left White Strip', 'is_colour': False},
]
if not profiles:
self.bright_white_mode = {
'name': 'bright white',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Hue color spot 2': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Hue color spot 3': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'DEATH STAR': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Right Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Right White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Left Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
}
}
self.normal_mode = {
'name': 'normal',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': False, 'light_state': {}, 'func': None},
'Hue color spot 2': {'is_on': False, 'light_state': {}, 'func': None},
'Hue color spot 3': {'is_on': False, 'light_state': {}, 'func': None},
'DEATH STAR': {'is_on': False, 'light_state': {}, 'func': None},
'Right Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Right White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
'Left Colour Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left White Strip': {'is_on': True, 'light_state': {}, 'func': self._normal_func},
}
}
self.colour_mode = {
'name': 'colour',
'profile_state': {},
'lights': {
'Hue color spot 1': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Hue color spot 2': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Hue color spot 3': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'DEATH STAR': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Right Colour Strip': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Right White Strip': {'is_on': False, 'light_state': {}, 'func': None},
'Left Colour Strip': {'is_on': True, 'light_state': {}, 'func': self._colour_func},
'Left White Strip': {'is_on': False, 'light_state': {}, 'func': None},
}
}
profiles = [
self.bright_white_mode,
self.normal_mode,
self.colour_mode,
]
self.light_configs = light_configs
self.profiles = cycle(profiles)
self.profile = next(self.profiles)
self.bridge_ip = bridge_ip
self.b = None
self.lights = []
def connect(self):
self.b = Bridge(self.bridge_ip)
self.b.connect()
pprint.pprint(self.b.get_api())
for actual_light in self.b.lights:
name = actual_light.name
for light_config in self.light_configs:
if light_config['name'] == name:
name += " *"
actual_light.is_colour = light_config['is_colour']
self.lights.append(actual_light)
print(name)
if self.lights:
print("connected")
for actual_light in self.lights:
pprint.pprint(actual_light.__dict__)
def on(self):
for light in self.lights:
light.on = True
def colour_temperature(self, temp):
for light in self.lights:
light.colortemp = temp
def xy(self, x, y):
for light in self.lights:
if light.is_colour:
light.xy = (x, y)
def random_colour(self):
for light in self.lights:
if light.is_colour:
light.xy = [random.random(), random.random()]
def hue(self, hue, sat=254):
# sat is 0-254?
for light in self.lights:
light.hue = hue
light.saturation = sat
def brightness(self, bright):
# // brightness between 0-254 (NB 0 is not off!)
for light in self.lights:
light.bri = bright
def colour_loop_off(self):
for light in self.lights:
if light.is_colour:
light.effect = "none"
def colour_loop_on(self):
for light in self.lights:
if light.is_colour:
light.effect = "colorloop"
def flash_once(self):
for light in self.lights:
light.alert = "select"
def flash_multiple(self):
for light in self.lights:
light.alert = "lselect"
def flash_off(self):
for light in self.lights:
light.alert = None
def off(self):
for light in self.lights:
light.on = False
@property
def is_on(self):
on = False
for light in self.lights:
on = on or light.on
return on
@property
def is_off(self):
return not self.is_on
def set_hsv(self, h, s, v):
h = int(h * 65535)
s = int(s * 255)
v = int(v * 255)
print((h, s, v))
for light in self.lights:
if light.is_colour:
light.hue = h
light.sat = s
light.bri = v
def quick_transitions(self):
for light in self.lights:
light.transitiontime = 0
def sleep(self, seconds):
time.sleep(seconds)
def next_profile(self):
self.profile = next(self.profiles)
def do_whatever(self):
now = datetime.datetime.now()
weekday = now.weekday()
hour = now.hour
monday = 0
friday = 4
saturday = 5
sunday = 6
is_daytime = 8 <= hour <= 18
in_work_hours = monday <= weekday <= friday and is_daytime
is_weekend = saturday <= weekday <= sunday
if in_work_hours:
self.profile = self.bright_white_mode
else:
if is_weekend:
self.profile = self.colour_mode
else:
self.profile = self.normal_mode
if is_daytime:
bright = 254
else:
bright = 8
if self.is_on:
for light in self.lights:
light_profile = self.profile['lights'][light.name]
profile_state = self.profile['profile_state']
if light_profile:
if light_profile['is_on'] != light.on:
light.on = light_profile['is_on']
light_func = light_profile['func']
light_state = light_profile['light_state']
if light_profile['is_on'] and light_func:
light_func(light=light, light_state=light_state, profile_state=profile_state, bright=bright)
def _normal_func(self, light, **kwargs):
# (white only) 154 is the coolest, 500 is the warmest
ct = 500 + int(colour_helper.day_factor * (154 - 500))
if "bright" in kwargs and kwargs["bright"]:
brightness = kwargs["bright"]
else:
# // brightness between 0-254 (NB 0 is not off!)
brightness = int(colour_helper.day_factor * 254)
light.colortemp = ct
light.brightness = brightness
pass
def _colour_func(self, light, **kwargs):
# hue' parameter has the range 0-65535 so represents approximately 182*degrees
minute = datetime.datetime.now().minute
hue = int(minute/59 * 65535)
sat = 254
if "bright" in kwargs and kwargs["bright"]:
brightness = kwargs["bright"]
else:
brightness = int(colour_helper.day_factor * 254)
light.hue = hue
light.saturation = sat
light.brightness = brightness
if __name__ == '__main__':
hue = HueWrapper()
hue.connect()
hue.on()
hue.brightness(254)
hue.colour_temperature(154)
hue.sleep(5)
hue.colour_temperature(500)
hue.sleep(5)
hue.colour_temperature(154)
hue.sleep(5)
hue.off()
| true | true |
f72ff313165970077760e6b80119c882d0e4e3b3 | 57,975 | py | Python | openconcept/analysis/performance/solver_phases.py | kanekosh/openconcept | f4646d583ba1840540e648601c963adab13cdccf | [
"MIT"
] | null | null | null | openconcept/analysis/performance/solver_phases.py | kanekosh/openconcept | f4646d583ba1840540e648601c963adab13cdccf | [
"MIT"
] | 1 | 2022-01-18T17:02:23.000Z | 2022-01-19T19:33:34.000Z | openconcept/analysis/performance/solver_phases.py | eytanadler/openconcept | 7878e5725eed78a023136b58250361531c7c7654 | [
"MIT"
] | 1 | 2021-11-13T22:40:31.000Z | 2021-11-13T22:40:31.000Z | from __future__ import division
from openmdao.api import Group, ExplicitComponent, IndepVarComp, BalanceComp, ImplicitComponent
import openconcept.api as oc
from openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties
from openconcept.analysis.aerodynamics import Lift, StallSpeed
from openconcept.utilities.math import ElementMultiplyDivideComp, AddSubtractComp
from openconcept.utilities.math.integrals import Integrator
from openconcept.utilities.linearinterp import LinearInterpolator
from openconcept.utilities.math.integrals import Integrator
import numpy as np
import copy
class ClimbAngleComp(ExplicitComponent):
"""
Computes steady climb angle based on excess thrust.
This is a helper function
and shouldn't be instantiated in the top-level model directly.
Inputs
------
drag : float
Aircraft drag at v2 (climb out) flight condition (scalar, N)
weight : float
Takeoff weight (scalar, kg)
thrust : float
Thrust at the v2 (climb out) flight condition (scalar, N)
Outputs
-------
gamma : float
Climb out flight path angle (scalar, rad)
Options
-------
num_nodes : int
Number of points to run
"""
def initialize(self):
self.options.declare('num_nodes', default=1)
def setup(self):
nn = self.options['num_nodes']
self.add_input('drag', units='N',shape=(nn,))
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_output('gamma', units='rad',shape=(nn,))
self.declare_partials(['gamma'], ['weight','thrust','drag'], cols=np.arange(0,nn), rows=np.arange(0,nn))
def compute(self, inputs, outputs):
g = 9.80665 #m/s^2
outputs['gamma'] = np.arcsin((inputs['thrust']-inputs['drag'])/inputs['weight']/g)
def compute_partials(self, inputs, J):
g = 9.80665 #m/s^2
interior_qty = (inputs['thrust']-inputs['drag'])/inputs['weight']/g
d_arcsin = 1/np.sqrt(1-interior_qty**2)
J['gamma','thrust'] = d_arcsin/inputs['weight']/g
J['gamma','drag'] = -d_arcsin/inputs['weight']/g
J['gamma','weight'] = -d_arcsin*(inputs['thrust']-inputs['drag'])/inputs['weight']**2/g
class FlipVectorComp(ExplicitComponent):
"""
Reverses the order of an OpenMDAO vector
This is a helper function
and shouldn't be instantiated in the top-level model directly.
Inputs
------
vec_in : float
Incoming vector in forward order
Outputs
-------
vec_out : float
Reversed order version of vec_in
Options
-------
num_nodes : int
Number of points to run
negative : boolean
Whether to apply a negative scaler. Default False preserves vector values.
True returns all values with negative sign.
units : string or None
Units for vec_in and vec_out (Default None)
Specify as an OpenMDAO unit string (e.g. 'kg')
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('negative',default=False)
self.options.declare('units',default=None)
def setup(self):
nn = self.options['num_nodes']
units = self.options['units']
self.add_input('vec_in', units=units, shape=(nn,))
self.add_output('vec_out', units=units, shape=(nn,))
negative = self.options['negative']
if negative:
scaler = -1
else:
scaler = 1
self.declare_partials(['vec_out'],['vec_in'],rows=np.arange(nn-1,-1,-1),cols=np.arange(0,nn,1),val=scaler*np.ones((nn,)))
def compute(self, inputs, outputs):
negative = self.options['negative']
if negative:
scaler = -1
else:
scaler = 1
outputs['vec_out'] = scaler * np.flip(inputs['vec_in'], 0)
class BFLImplicitSolve(ImplicitComponent):
"""
Computes a residual equation so Newton solver can set v1 to analyze balanced field length
This residual is equal to zero if:
- The rejected takeoff and engine-out takeoff distances are equal, or:
- V1 is equal to VR and the engine out takeoff distance is longer than the RTO distance
Since this is a discontinous function, the partial derivatives are written in a special way
to 'coax' the V1 value into the right setting with a Newton step. It's kind of a hack.
Inputs
------
distance_continue : float
Engine-out takeoff distance (scalar, m)
distance_abort : float
Distance to full-stop when takeoff is rejected at V1 (scalar, m)
takeoff|vr : float
Rotation speed (scalar, m/s)
Outputs
-------
takeoff|v1 : float
Decision speed (scalar, m/s)
"""
def setup(self):
self.add_input('distance_continue', units='m')
self.add_input('distance_abort', units='m')
self.add_input('takeoff|vr', units='m/s')
self.add_output('takeoff|v1', units='m/s',val=20,lower=10,upper=150)
self.declare_partials('takeoff|v1',['distance_continue','distance_abort','takeoff|v1','takeoff|vr'])
def apply_nonlinear(self, inputs, outputs, residuals):
speedtol = 1e-1
disttol = 0
#force the decision speed to zero
if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:
residuals['takeoff|v1'] = inputs['takeoff|vr'] - outputs['takeoff|v1']
else:
residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']
#if you are within vtol on the correct side but the stopping distance bigger, use the regular mode
if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:
residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']
def linearize(self, inputs, outputs, partials):
speedtol = 1e-1
disttol = 0
if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:
partials['takeoff|v1','distance_continue'] = 0
partials['takeoff|v1','distance_abort'] = 0
partials['takeoff|v1','takeoff|vr'] = 1
partials['takeoff|v1','takeoff|v1'] = -1
else:
partials['takeoff|v1','distance_continue'] = 1
partials['takeoff|v1','distance_abort'] = -1
partials['takeoff|v1','takeoff|vr'] = 0
partials['takeoff|v1','takeoff|v1'] = 0
if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:
partials['takeoff|v1','distance_continue'] = 1
partials['takeoff|v1','distance_abort'] = -1
partials['takeoff|v1','takeoff|vr'] = 0
partials['takeoff|v1','takeoff|v1'] = 0
class Groundspeeds(ExplicitComponent):
"""
Computes groundspeed for vectorial true airspeed and true vertical speed.
This is a helper function for the main mission analysis routines
and shouldn't be instantiated directly.
Inputs
------
fltcond|vs : float
Vertical speed for all mission phases (vector, m/s)
fltcond|Utrue : float
True airspeed for all mission phases (vector, m/s)
Outputs
-------
fltcond|groundspeed : float
True groundspeed for all mission phases (vector, m/s)
fltcond|cosgamma : float
Cosine of the flght path angle for all mission phases (vector, dimensionless)
fltcond|singamma : float
Sine of the flight path angle for all mission phases (vector, dimensionless)
Options
-------
num_nodes : int
Number of points to run
"""
def initialize(self):
self.options.declare('num_nodes',default=1,desc="Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1")
def setup(self):
nn = self.options['num_nodes']
self.add_input('fltcond|vs', units='m/s',shape=(nn,))
self.add_input('fltcond|Utrue', units='m/s',shape=(nn,))
self.add_output('fltcond|groundspeed', units='m/s',shape=(nn,))
self.add_output('fltcond|cosgamma', shape=(nn,), desc='Cosine of the flight path angle')
self.add_output('fltcond|singamma', shape=(nn,), desc='sin of the flight path angle' )
self.declare_partials(['fltcond|groundspeed','fltcond|cosgamma','fltcond|singamma'], ['fltcond|vs','fltcond|Utrue'], rows=range(nn), cols=range(nn))
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
#compute the groundspeed on climb and desc
inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2
groundspeed = np.sqrt(inside)
groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))
#groundspeed = np.sqrt(inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2)
#groundspeed_fixed= np.where(np.isnan(groundspeed),0,groundspeed)
outputs['fltcond|groundspeed'] = groundspeed_fixed
outputs['fltcond|singamma'] = np.where(np.isnan(groundspeed),1,inputs['fltcond|vs'] / inputs['fltcond|Utrue'])
outputs['fltcond|cosgamma'] = groundspeed_fixed / inputs['fltcond|Utrue']
def compute_partials(self, inputs, J):
inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2
groundspeed = np.sqrt(inside)
groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))
J['fltcond|groundspeed','fltcond|vs'] = np.where(np.isnan(groundspeed),0,(1/2) / groundspeed_fixed * (-2) * inputs['fltcond|vs'])
J['fltcond|groundspeed','fltcond|Utrue'] = np.where(np.isnan(groundspeed),0, (1/2) / groundspeed_fixed * 2 * inputs['fltcond|Utrue'])
J['fltcond|singamma','fltcond|vs'] = np.where(np.isnan(groundspeed), 0, 1 / inputs['fltcond|Utrue'])
J['fltcond|singamma','fltcond|Utrue'] = np.where(np.isnan(groundspeed), 0, - inputs['fltcond|vs'] / inputs['fltcond|Utrue'] ** 2)
J['fltcond|cosgamma','fltcond|vs'] = J['fltcond|groundspeed','fltcond|vs'] / inputs['fltcond|Utrue']
J['fltcond|cosgamma','fltcond|Utrue'] = (J['fltcond|groundspeed','fltcond|Utrue'] * inputs['fltcond|Utrue'] - groundspeed_fixed) / inputs['fltcond|Utrue']**2
class HorizontalAcceleration(ExplicitComponent):
"""
Computes acceleration during takeoff run and effectively forms the T-D residual.
Inputs
------
weight : float
Aircraft weight (scalar, kg)
drag : float
Aircraft drag at each analysis point (vector, N)
lift : float
Aircraft lift at each analysis point (vector, N)
thrust : float
Thrust at each TO analysis point (vector, N)
fltcond|singamma : float
The sine of the flight path angle gamma (vector, dimensionless)
braking : float
Effective rolling friction multiplier at each point (vector, dimensionless)
Outputs
-------
accel_horiz : float
Aircraft horizontal acceleration (vector, m/s**2)
Options
-------
num_nodes : int
Number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
def setup(self):
nn = self.options['num_nodes']
g = 9.80665 #m/s^2
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('drag', units='N',shape=(nn,))
self.add_input('lift', units='N',shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_input('fltcond|singamma',shape=(nn,))
self.add_input('braking',shape=(nn,))
self.add_output('accel_horiz', units='m/s**2', shape=(nn,))
arange=np.arange(nn)
self.declare_partials(['accel_horiz'], ['weight','drag','lift','thrust','braking'], rows=arange, cols=arange)
self.declare_partials(['accel_horiz'], ['fltcond|singamma'], rows=arange, cols=arange, val=-g*np.ones((nn,)))
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
g = 9.80665 #m/s^2
m = inputs['weight']
floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)
accel = inputs['thrust']/m - inputs['drag']/m - floor_vec*inputs['braking']*(g-inputs['lift']/m) - g*inputs['fltcond|singamma']
outputs['accel_horiz'] = accel
def compute_partials(self, inputs, J):
g = 9.80665 #m/s^2
m = inputs['weight']
floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)
J['accel_horiz','thrust'] = 1/m
J['accel_horiz','drag'] = -1/m
J['accel_horiz','braking'] = -floor_vec*(g-inputs['lift']/m)
J['accel_horiz','lift'] = floor_vec*inputs['braking']/m
J['accel_horiz','weight'] = (inputs['drag']-inputs['thrust']-floor_vec*inputs['braking']*inputs['lift'])/m**2
class VerticalAcceleration(ExplicitComponent):
"""
Computes acceleration during takeoff run in the vertical plane.
Only used during full unsteady takeoff performance analysis due to stability issues
Inputs
------
weight : float
Aircraft weight (scalar, kg)
drag : float
Aircraft drag at each analysis point (vector, N)
lift : float
Aircraft lift at each analysis point (vector, N)
thrust : float
Thrust at each TO analysis point (vector, N)
fltcond|singamma : float
The sine of the flight path angle gamma (vector, dimensionless)
fltcond|cosgamma : float
The sine of the flight path angle gamma (vector, dimensionless)
Outputs
-------
accel_vert : float
Aircraft horizontal acceleration (vector, m/s**2)
Options
-------
num_nodes : int
Number of analysis points to run
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
def setup(self):
nn = self.options['num_nodes']
g = 9.80665 #m/s^2
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('drag', units='N',shape=(nn,))
self.add_input('lift', units='N',shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_input('fltcond|singamma',shape=(nn,))
self.add_input('fltcond|cosgamma',shape=(nn,))
self.add_output('accel_vert', units='m/s**2', shape=(nn,),upper=2.5*g,lower=-1*g)
arange=np.arange(nn)
self.declare_partials(['accel_vert'], ['weight','drag','lift','thrust','fltcond|singamma','fltcond|cosgamma'], rows=arange, cols=arange)
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
g = 9.80665 #m/s^2
cosg = inputs['fltcond|cosgamma']
sing = inputs['fltcond|singamma']
accel = (inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing - g*inputs['weight'])/inputs['weight']
accel = np.clip(accel, -g, 2.5*g)
outputs['accel_vert'] = accel
def compute_partials(self, inputs, J):
g = 9.80665 #m/s^2
m = inputs['weight']
cosg = inputs['fltcond|cosgamma']
sing = inputs['fltcond|singamma']
J['accel_vert','thrust'] = sing / m
J['accel_vert','drag'] = -sing / m
J['accel_vert','lift'] = cosg / m
J['accel_vert','fltcond|singamma'] = (inputs['thrust']-inputs['drag']) / m
J['accel_vert','fltcond|cosgamma'] = inputs['lift'] / m
J['accel_vert','weight'] = -(inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing)/m**2
class SteadyFlightCL(ExplicitComponent):
"""
Computes lift coefficient at each analysis point
This is a helper function for the main mission analysis routine
and shouldn't be instantiated directly.
Inputs
------
weight : float
Aircraft weight at each analysis point (vector, kg)
fltcond|q : float
Dynamic pressure at each analysis point (vector, Pascal)
ac|geom|wing|S_ref : float
Reference wing area (scalar, m**2)
fltcond|cosgamma : float
Cosine of the flght path angle for all mission phases (vector, dimensionless)
Outputs
-------
fltcond|CL : float
Lift coefficient (vector, dimensionless)
Options
-------
num_nodes : int
Number of analysis nodes to run
mission_segments : list
The list of mission segments to track
"""
def initialize(self):
self.options.declare('num_nodes',default=5,desc="Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1")
self.options.declare('mission_segments',default=['climb','cruise','descent'])
def setup(self):
nn = self.options['num_nodes']
arange = np.arange(nn)
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('fltcond|q', units='N * m**-2', shape=(nn,))
self.add_input('ac|geom|wing|S_ref', units='m **2')
self.add_input('fltcond|cosgamma', val=1.0, shape=(nn,))
self.add_output('fltcond|CL',shape=(nn,))
self.declare_partials(['fltcond|CL'], ['weight','fltcond|q',"fltcond|cosgamma"], rows=arange, cols=arange)
self.declare_partials(['fltcond|CL'], ['ac|geom|wing|S_ref'], rows=arange, cols=np.zeros(nn))
def compute(self, inputs, outputs):
g = 9.80665 #m/s^2
outputs['fltcond|CL'] = inputs['fltcond|cosgamma']*g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
def compute_partials(self, inputs, J):
g = 9.80665 #m/s^2
J['fltcond|CL','weight'] = inputs['fltcond|cosgamma']*g/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
J['fltcond|CL','fltcond|q'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q']**2 / inputs['ac|geom|wing|S_ref']
J['fltcond|CL','ac|geom|wing|S_ref'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q'] / inputs['ac|geom|wing|S_ref']**2
J['fltcond|CL','fltcond|cosgamma'] = g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
class GroundRollPhase(oc.PhaseGroup):
"""
This component group models the ground roll phase of a takeoff (acceleration before flight)
User-settable parameters include:
throttle (default 100 percent)
rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)
propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure
altitude (fltcond|h)
The BaseAircraftGroup object is passed in.
The BaseAircraftGroup should be built to accept the following inputs
and return the following outputs.
The outputs should be promoted to the top level in the component.
Inputs
------
range : float
Total distance travelled (vector, m)
fltcond|h : float
Altitude (vector, m)
fltcond|vs : float
Vertical speed (vector, m/s)
fltcond|Ueas : float
Equivalent airspeed (vector, m/s)
fltcond|Utrue : float
True airspeed (vector, m/s)
fltcond|p : float
Pressure (vector, Pa)
fltcond|rho : float
Density (vector, kg/m3)
fltcond|T : float
Temperature (vector, K)
fltcond|q : float
Dynamic pressure (vector, Pa)
fltcond|CL : float
Lift coefficient (vector, dimensionless)
throttle : float
Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
propulsor_active : float
If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
braking : float
Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)
Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
lift : float
Lift force (vector, N)
Outputs
-------
thrust : float
Total thrust force produced by all propulsors (vector, N)
drag : float
Total drag force in the airplane axis produced by all sources of drag (vector, N)
weight : float
Weight (mass, really) of the airplane at each point in time. (vector, kg)
ac|geom|wing|S_ref
Wing reference area (scalar, m**2)
ac|aero|CLmax_TO
CLmax with flaps in max takeoff position (scalar, dimensionless)
ac|weights|MTOW
Maximum takeoff weight (scalar, kg)
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
# set CL = 0.1 for the ground roll per Raymer's book
ivcomp.add_output('fltcond|CL', val=np.ones((nn,))*0.1)
ivcomp.add_output('vr_vstall_mult',val=1.1)
ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')
ivcomp.add_output('fltcond|vs',val=np.zeros((nn,)),units='m/s')
ivcomp.add_output('zero_speed',val=2,units='m/s')
flight_phase = self.options['flight_phase']
if flight_phase == 'v0v1':
ivcomp.add_output('braking',val=np.ones((nn,))*0.03)
ivcomp.add_output('propulsor_active',val=np.ones((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
zero_start = True
elif flight_phase == 'v1vr':
ivcomp.add_output('braking',val=np.ones((nn,))*0.03)
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
zero_start = False
elif flight_phase == 'v1v0':
ivcomp.add_output('braking',val=0.4*np.ones((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.zeros((nn,)))
zero_start=False
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# add the user-defined aircraft model
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
if flight_phase == 'v1v0':
#unfortunately need to shoot backwards to avoid negative airspeeds
#reverse the order of the accelerations so the last one is first (and make them negative)
self.add_subsystem('flipaccel', FlipVectorComp(num_nodes=nn, units='m/s**2', negative=True), promotes_inputs=[('vec_in','accel_horiz')])
#integrate the timesteps in reverse from near zero speed.
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])
ode_integ.add_integrand('vel_q', units='m/s', rate_name='vel_dqdt', start_name='zero_speed', end_name='fltcond|Utrue_initial', lower=1.5)
self.connect('flipaccel.vec_out','vel_dqdt')
#flip the result of the reverse integration again so the flight condition is forward and consistent with everythign else
self.add_subsystem('flipvel', FlipVectorComp(num_nodes=nn, units='m/s', negative=False), promotes_outputs=[('vec_out','fltcond|Utrue')])
self.connect('vel_q','flipvel.vec_in')
# now set the time step so that backwards shooting results in the correct 'initial' segment airspeed
self.add_subsystem('v0constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_initial',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),
promotes_inputs=['*'],promotes_outputs=['duration'])
else:
# forward shooting for these acceleration segmentes
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])
ode_integ.add_integrand('fltcond|Utrue', units='m/s', rate_name='accel_horiz', start_name='fltcond|Utrue_initial', end_name='fltcond|Utrue_final', lower=1.5)
if flight_phase == 'v0v1':
self.connect('zero_speed','fltcond|Utrue_initial')
self.add_subsystem('v1constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),
promotes_inputs=['*'],promotes_outputs=['duration'])
elif flight_phase == 'v1vr':
self.add_subsystem('vrconstraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|vr',val=5.,upper=12.,lower=0.0),
promotes_inputs=['*'],promotes_outputs=['duration'])
if zero_start:
ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m', zero_start=True)
else:
ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m')
class RotationPhase(oc.PhaseGroup):
"""
This group models the transition from ground roll to climb out during a takeoff
using force balance in the vertical and horizontal directions.
User-settable parameters include:
throttle (default 100 percent)
rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)
propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure
altitude (fltcond|h)
obstacle clearance hight (h_obs) default 35 feet per FAR 25
Rotation CL/CLmax ratio (default 0.83)
The BaseAircraftGroup object is passed in.
The BaseAircraftGroup should be built to accept the following inputs
and return the following outputs.
The outputs should be promoted to the top level in the component.
Inputs
------
range : float
Total distance travelled (vector, m)
fltcond|h : float
Altitude (vector, m)
fltcond|vs : float
Vertical speed (vector, m/s)
fltcond|Ueas : float
Equivalent airspeed (vector, m/s)
fltcond|Utrue : float
True airspeed (vector, m/s)
fltcond|p : float
Pressure (vector, Pa)
fltcond|rho : float
Density (vector, kg/m3)
fltcond|T : float
Temperature (vector, K)
fltcond|q : float
Dynamic pressure (vector, Pa)
fltcond|CL : float
Lift coefficient (vector, dimensionless)
throttle : float
Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
propulsor_active : float
If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
braking : float
Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
lift : float
Lift force (vector, N)
Outputs
-------
thrust : float
Total thrust force produced by all propulsors (vector, N)
drag : float
Total drag force in the airplane axis produced by all sources of drag (vector, N)
weight : float
Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)
ac|geom|wing|S_ref
Wing reference area (scalar, m**2)
ac|aero|CLmax_TO
CLmax with flaps in max takeoff position (scalar, dimensionless)
ac|weights|MTOW
Maximum takeoff weight (scalar, kg)
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None)
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('CL_rotate_mult', val=np.ones((nn,))*0.83)
ivcomp.add_output('h_obs', val=35, units='ft')
flight_phase = self.options['flight_phase']
if flight_phase == 'rotate':
ivcomp.add_output('braking',val=np.zeros((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
clcomp = self.add_subsystem('clcomp',ElementMultiplyDivideComp(output_name='fltcond|CL', input_names=['CL_rotate_mult','ac|aero|CLmax_TO'],
vec_size=[nn,1], length=1),
promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('vaccel',VerticalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# TODO always starts from zero altitude
self.add_subsystem('clear_obstacle',BalanceComp(name='duration',units='s',val=1,eq_units='m',rhs_name='fltcond|h_final',lhs_name='h_obs',lower=0.1,upper=15),
promotes_inputs=['*'],promotes_outputs=['duration'])
int1 = self.add_subsystem('intvelocity', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int1.add_integrand('fltcond|Utrue', rate_name='accel_horiz', units='m/s', lower=0.1)
int2 = self.add_subsystem('intrange', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int2.add_integrand('range', rate_name='fltcond|groundspeed', units='m')
int3 = self.add_subsystem('intvs', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int3.add_integrand('fltcond|vs', rate_name='accel_vert', units='m/s', zero_start=True)
int4 = self.add_subsystem('inth', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int4.add_integrand('fltcond|h', rate_name='fltcond|vs', units='m', zero_start=True)
class SteadyFlightPhase(oc.PhaseGroup):
"""
This component group models steady flight conditions.
Settable mission parameters include:
Airspeed (fltcond|Ueas)
Vertical speed (fltcond|vs)
Duration of the segment (duration)
Throttle is set automatically to ensure steady flight
The BaseAircraftGroup object is passed in.
The BaseAircraftGroup should be built to accept the following inputs
and return the following outputs.
The outputs should be promoted to the top level in the component.
Inputs
------
range : float
Total distance travelled (vector, m)
fltcond|h : float
Altitude (vector, m)
fltcond|vs : float
Vertical speed (vector, m/s)
fltcond|Ueas : float
Equivalent airspeed (vector, m/s)
fltcond|Utrue : float
True airspeed (vector, m/s)
fltcond|p : float
Pressure (vector, Pa)
fltcond|rho : float
Density (vector, kg/m3)
fltcond|T : float
Temperature (vector, K)
fltcond|q : float
Dynamic pressure (vector, Pa)
fltcond|CL : float
Lift coefficient (vector, dimensionless)
throttle : float
Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
propulsor_active : float
If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
braking : float
Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)
Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
lift : float
Lift force (vector, N)
Outputs
-------
thrust : float
Total thrust force produced by all propulsors (vector, N)
drag : float
Total drag force in the airplane axis produced by all sources of drag (vector, N)
weight : float
Weight (mass, really) of the airplane at each point in time. (vector, kg)
ac|geom|wing|S_ref
Wing reference area (scalar, m**2)
ac|aero|CLmax_TO
CLmax with flaps in max takeoff position (scalar, dimensionless)
ac|weights|MTOW
Maximum takeoff weight (scalar, kg)
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('propulsor_active', val=np.ones(nn))
ivcomp.add_output('braking', val=np.zeros(nn))
ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')
ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')
ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')
integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', time_setup='duration', method='simpson'), promotes_inputs=['fltcond|vs', 'fltcond|groundspeed'], promotes_outputs=['fltcond|h', 'range'])
integ.add_integrand('fltcond|h', rate_name='fltcond|vs', val=1.0, units='m')
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# add the user-defined aircraft model
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
integ.add_integrand('range', rate_name='fltcond|groundspeed', val=1.0, units='m')
self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),
promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])
# class OldSteadyFlightPhase(Group):
# """
# This component group models steady flight conditions.
# Settable mission parameters include:
# Airspeed (fltcond|Ueas)
# Vertical speed (fltcond|vs)
# Duration of the segment (duration)
# Throttle is set automatically to ensure steady flight
# The BaseAircraftGroup object is passed in.
# The BaseAircraftGroup should be built to accept the following inputs
# and return the following outputs.
# The outputs should be promoted to the top level in the component.
# Inputs
# ------
# range : float
# Total distance travelled (vector, m)
# fltcond|h : float
# Altitude (vector, m)
# fltcond|vs : float
# Vertical speed (vector, m/s)
# fltcond|Ueas : float
# Equivalent airspeed (vector, m/s)
# fltcond|Utrue : float
# True airspeed (vector, m/s)
# fltcond|p : float
# Pressure (vector, Pa)
# fltcond|rho : float
# Density (vector, kg/m3)
# fltcond|T : float
# Temperature (vector, K)
# fltcond|q : float
# Dynamic pressure (vector, Pa)
# fltcond|CL : float
# Lift coefficient (vector, dimensionless)
# throttle : float
# Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
# propulsor_active : float
# If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
# It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
# braking : float
# Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)
# Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
# lift : float
# Lift force (vector, N)
# Outputs
# -------
# thrust : float
# Total thrust force produced by all propulsors (vector, N)
# drag : float
# Total drag force in the airplane axis produced by all sources of drag (vector, N)
# weight : float
# Weight (mass, really) of the airplane at each point in time. (vector, kg)
# ac|geom|wing|S_ref
# Wing reference area (scalar, m**2)
# ac|aero|CLmax_TO
# CLmax with flaps in max takeoff position (scalar, dimensionless)
# ac|weights|MTOW
# Maximum takeoff weight (scalar, kg)
# """
# def initialize(self):
# self.options.declare('num_nodes',default=1)
# self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
# self.options.declare('aircraft_model',default=None)
# def setup(self):
# nn = self.options['num_nodes']
# ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
# ivcomp.add_output('propulsor_active', val=np.ones(nn))
# ivcomp.add_output('braking', val=np.zeros(nn))
# ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')
# ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')
# ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')
# self.add_subsystem('inth',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),
# promotes_inputs=[('dqdt','fltcond|vs'),'duration',('q_initial','fltcond|h_initial')],promotes_outputs=[('q','fltcond|h'),('q_final','fltcond|h_final')])
# self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
# self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# # add the user-defined aircraft model
# self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('intrange',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),
# promotes_inputs=[('dqdt','fltcond|groundspeed'),'duration',('q_initial','range_initial')],promotes_outputs=[('q','range'),('q_final','range_final')])
# self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),
# promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])
class ClimbAnglePhase(Group):
"""
This component checks the climb angle for a
single flight condition at the V2 speed. No integration is performed.
User settable parameter includes the V2/Vstall multiple (default 1.2)
Useful for ensuring all-engine climb gradients in optimization.
Choose flight_phase = AllEngineClimbAngle or EngineOutClimbAngle
to set the propulsor_active property correctly.
Inputs
------
range : float
Total distance travelled (vector, m)
fltcond|h : float
Altitude (vector, m)
fltcond|vs : float
Vertical speed (vector, m/s)
fltcond|Ueas : float
Equivalent airspeed (vector, m/s)
fltcond|Utrue : float
True airspeed (vector, m/s)
fltcond|p : float
Pressure (vector, Pa)
fltcond|rho : float
Density (vector, kg/m3)
fltcond|T : float
Temperature (vector, K)
fltcond|q : float
Dynamic pressure (vector, Pa)
fltcond|CL : float
Lift coefficient (vector, dimensionless)
throttle : float
Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
propulsor_active : float
If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
lift : float
Lift force (vector, N)
Outputs
-------
thrust : float
Total thrust force produced by all propulsors (vector, N)
drag : float
Total drag force in the airplane axis produced by all sources of drag (vector, N)
weight : float
Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)
ac|geom|wing|S_ref
Wing reference area (scalar, m**2)
ac|aero|CLmax_TO
CLmax with flaps in max takeoff position (scalar, dimensionless)
ac|weights|MTOW
Maximum takeoff weight (scalar, kg)
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('v2_vstall_mult',val=1.2)
ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')
ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)))
flight_phase = self.options['flight_phase']
if flight_phase == 'AllEngineClimbAngle':
ivcomp.add_output('propulsor_active',val=np.ones((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
elif flight_phase == 'EngineOutClimbAngle':
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=[('weight','ac|weights|MTOW'),'fltcond|*','ac|*'],promotes_outputs=['*'])
self.connect('takeoff|v2','fltcond|Ueas')
# the aircraft model needs to provide thrust and drag
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag',('weight','ac|weights|MTOW'),'thrust'],promotes_outputs=['gamma'])
class TakeoffTransition(ExplicitComponent):
"""
Computes distance and altitude at end of circular transition.
Based on TO distance analysis method in Raymer book.
Obstacle clearance height set for GA / Part 23 aircraft
Override for analyzing Part 25 aircraft
Inputs
------
fltcond|Utrue
Transition true airspeed (generally avg of vr and v2) (scalar, m/s)
gamma : float
Climb out flight path angle (scalar, rad)
Outputs
-------
s_transition : float
Horizontal distance during transition to v2 climb out (scalar, m)
h_transition : float
Altitude at transition point (scalar, m)
t_transition : float
Elapsed time in transition (scalar, s)
Options
-------
h_obstacle : float
Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)
load_factor : float
Load factor during rotation and transition (default 1.2 from Raymer book)
"""
def initialize(self):
self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')
self.options.declare('load_factor', default=1.2, desc='Load factor during circular arc transition')
def setup(self):
self.add_input('fltcond|Utrue', units='m/s', src_indices=0)
self.add_input('gamma', units='rad', src_indices=0)
self.add_output('s_transition', units='m')
self.add_output('h_transition', units='m')
self.add_output('t_transition',units='s')
self.declare_partials(['s_transition','h_transition','t_transition'], ['fltcond|Utrue','gamma'])
def compute(self, inputs, outputs):
hobs = self.options['h_obstacle']
nfactor = self.options['load_factor'] - 1
g = 9.80665 #m/s^2
gam = inputs['gamma']
ut = inputs['fltcond|Utrue']
R = ut**2/nfactor/g
st = R*np.sin(gam)
ht = R*(1-np.cos(gam))
#alternate formula if the obstacle is cleared during transition
if ht > hobs:
st = np.sqrt(R**2-(R-hobs)**2)
ht = hobs
outputs['s_transition'] = st
outputs['h_transition'] = ht
outputs['t_transition'] = st / ut
def compute_partials(self, inputs, J):
hobs = self.options['h_obstacle']
nfactor = self.options['load_factor'] - 1
g = 9.80665 #m/s^2
gam = inputs['gamma']
ut = inputs['fltcond|Utrue']
R = ut**2/nfactor/g
dRdut = 2*ut/nfactor/g
st = R*np.sin(gam)
ht = R*(1-np.cos(gam))
#alternate formula if the obstacle is cleared during transition
if ht > hobs:
st = np.sqrt(R**2-(R-hobs)**2)
dstdut = 1/2/np.sqrt(R**2-(R-hobs)**2) * (2*R*dRdut - 2*(R-hobs)*dRdut)
dstdgam = 0
dhtdut = 0
dhtdgam = 0
else:
dhtdut = dRdut*(1-np.cos(gam))
dhtdgam = R*np.sin(gam)
dstdut = dRdut*np.sin(gam)
dstdgam = R*np.cos(gam)
J['s_transition','gamma'] = dstdgam
J['s_transition','fltcond|Utrue'] = dstdut
J['h_transition','gamma'] = dhtdgam
J['h_transition','fltcond|Utrue'] = dhtdut
J['t_transition','gamma'] = dstdgam / ut
J['t_transition','fltcond|Utrue'] = (dstdut * ut - st) / ut ** 2
class TakeoffClimb(ExplicitComponent):
"""
Computes ground distance from end of transition until obstacle is cleared.
Analysis based on Raymer book.
Inputs
------
gamma : float
Climb out flight path angle (scalar, rad)
h_transition : float
Altitude at transition point (scalar, m)
Outputs
-------
s_climb : float
Horizontal distance from end of transition until obstacle is cleared (scalar, m)
Options
-------
h_obstacle : float
Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)
"""
def initialize(self):
self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')
def setup(self):
self.add_input('h_transition', units='m')
self.add_input('gamma', units='rad',src_indices=-1)
self.add_input('fltcond|Utrue', units='m/s',src_indices=-1)
self.add_output('s_climb', units='m')
self.add_output('t_climb', units='s')
self.declare_partials(['s_climb'], ['h_transition','gamma'])
self.declare_partials(['t_climb'], ['h_transition','gamma','fltcond|Utrue'])
def compute(self, inputs, outputs):
hobs = self.options['h_obstacle']
gam = inputs['gamma']
ht = inputs['h_transition']
ut = inputs['fltcond|Utrue']
sc = (hobs-ht)/np.tan(gam)
outputs['s_climb'] = sc
outputs['t_climb'] = sc / ut
def compute_partials(self, inputs, J):
hobs = self.options['h_obstacle']
gam = inputs['gamma']
ht = inputs['h_transition']
ut = inputs['fltcond|Utrue']
sc = (hobs-ht)/np.tan(gam)
J['s_climb','gamma'] = -(hobs-ht)/np.tan(gam)**2 * (1/np.cos(gam))**2
J['s_climb','h_transition'] = -1/np.tan(gam)
J['t_climb','gamma'] = J['s_climb','gamma'] / ut
J['t_climb','h_transition'] = J['s_climb','h_transition'] / ut
J['t_climb','fltcond|Utrue'] = - sc / ut ** 2
class RobustRotationPhase(oc.PhaseGroup):
"""
This adds general mission analysis capabilities to an existing airplane model.
The BaseAircraftGroup object is passed in. It should be built to accept the following inputs and return the following outputs.
The outputs should be promoted to the top level in the component.
Inputs
------
range : float
Total distance travelled (vector, m)
fltcond|h : float
Altitude (vector, m)
fltcond|vs : float
Vertical speed (vector, m/s)
fltcond|Ueas : float
Equivalent airspeed (vector, m/s)
fltcond|Utrue : float
True airspeed (vector, m/s)
fltcond|p : float
Pressure (vector, Pa)
fltcond|rho : float
Density (vector, kg/m3)
fltcond|T : float
Temperature (vector, K)
fltcond|q : float
Dynamic pressure (vector, Pa)
fltcond|CL : float
Lift coefficient (vector, dimensionless)
throttle : float
Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
propulsor_active : float
If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
braking : float
Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
lift : float
Lift force (vector, N)
Outputs
-------
thrust : float
Total thrust force produced by all propulsors (vector, N)
drag : float
Total drag force in the airplane axis produced by all sources of drag (vector, N)
weight : float
Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)
ac|geom|wing|S_ref
Wing reference area (scalar, m**2)
ac|aero|CLmax_TO
CLmax with flaps in max takeoff position (scalar, dimensionless)
ac|weights|MTOW
Maximum takeoff weight (scalar, kg)
"""
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
self.options.declare('h_obstacle',default=10.66, )
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
flight_phase = self.options['flight_phase']
if flight_phase == 'rotate':
ivcomp.add_output('braking',val=np.zeros((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
# flight conditions are sea level takeoff, transition speed
# split off a single node to compute climb angle
# compute the transition distance and add it to range_initial
# compute the transition time as a function of the groundspeed
# provide transition time as duration
ivcomp.add_output('v2_vstall_mult',val=1.2)
ivcomp.add_output('vr_vstall_mult',val=1.1)
ivcomp.add_output('fltcond|vs', val=np.zeros((nn,)),units='m/s')
ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)),units=None)
ivcomp.add_output('h_obstacle',val=35,units='ft')
self.add_subsystem('altitudes',LinearInterpolator(num_nodes=nn, units='m'),promotes_inputs=[('start_val','h_initial')],promotes_outputs=[('vec','fltcond|h')])
self.connect('h_obstacle','altitudes.end_val')
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('v2speed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('speeds',LinearInterpolator(num_nodes=nn,units='kn'),promotes_inputs=[('start_val','takeoff|vr'),('end_val','takeoff|v2')],promotes_outputs=[('vec','fltcond|Ueas')])
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
# pretty confident there's a simpler closed form multiple for CL at v2
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['weight','fltcond|*','ac|*'],promotes_outputs=['*'])
# the aircraft model needs to provide thrust and drag
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag','weight','thrust'],promotes_outputs=['gamma'])
self.add_subsystem('transition',TakeoffTransition(),promotes_inputs=['fltcond|Utrue','gamma'],promotes_outputs=['h_transition','s_transition','t_transition'])
self.add_subsystem('v2climb',TakeoffClimb(),promotes_inputs=['h_transition','gamma','fltcond|Utrue'],promotes_outputs=['s_climb','t_climb'])
self.add_subsystem('tod_final',AddSubtractComp(output_name='range_final',input_names=['range_initial','s_transition','s_climb'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('duration',AddSubtractComp(output_name='duration',input_names=['t_transition','t_climb'],units='s'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('h_final',AddSubtractComp(output_name='fltcond|h_final',input_names=['h_obstacle'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('ranges',LinearInterpolator(num_nodes=nn,units='m'),promotes_inputs=[('start_val','range_initial'),('end_val','range_final')],promotes_outputs=[('vec','range')])
| 47.952854 | 228 | 0.65185 | from __future__ import division
from openmdao.api import Group, ExplicitComponent, IndepVarComp, BalanceComp, ImplicitComponent
import openconcept.api as oc
from openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties
from openconcept.analysis.aerodynamics import Lift, StallSpeed
from openconcept.utilities.math import ElementMultiplyDivideComp, AddSubtractComp
from openconcept.utilities.math.integrals import Integrator
from openconcept.utilities.linearinterp import LinearInterpolator
from openconcept.utilities.math.integrals import Integrator
import numpy as np
import copy
class ClimbAngleComp(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', default=1)
def setup(self):
nn = self.options['num_nodes']
self.add_input('drag', units='N',shape=(nn,))
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_output('gamma', units='rad',shape=(nn,))
self.declare_partials(['gamma'], ['weight','thrust','drag'], cols=np.arange(0,nn), rows=np.arange(0,nn))
def compute(self, inputs, outputs):
g = 9.80665
outputs['gamma'] = np.arcsin((inputs['thrust']-inputs['drag'])/inputs['weight']/g)
def compute_partials(self, inputs, J):
g = 9.80665
interior_qty = (inputs['thrust']-inputs['drag'])/inputs['weight']/g
d_arcsin = 1/np.sqrt(1-interior_qty**2)
J['gamma','thrust'] = d_arcsin/inputs['weight']/g
J['gamma','drag'] = -d_arcsin/inputs['weight']/g
J['gamma','weight'] = -d_arcsin*(inputs['thrust']-inputs['drag'])/inputs['weight']**2/g
class FlipVectorComp(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('negative',default=False)
self.options.declare('units',default=None)
def setup(self):
nn = self.options['num_nodes']
units = self.options['units']
self.add_input('vec_in', units=units, shape=(nn,))
self.add_output('vec_out', units=units, shape=(nn,))
negative = self.options['negative']
if negative:
scaler = -1
else:
scaler = 1
self.declare_partials(['vec_out'],['vec_in'],rows=np.arange(nn-1,-1,-1),cols=np.arange(0,nn,1),val=scaler*np.ones((nn,)))
def compute(self, inputs, outputs):
negative = self.options['negative']
if negative:
scaler = -1
else:
scaler = 1
outputs['vec_out'] = scaler * np.flip(inputs['vec_in'], 0)
class BFLImplicitSolve(ImplicitComponent):
def setup(self):
self.add_input('distance_continue', units='m')
self.add_input('distance_abort', units='m')
self.add_input('takeoff|vr', units='m/s')
self.add_output('takeoff|v1', units='m/s',val=20,lower=10,upper=150)
self.declare_partials('takeoff|v1',['distance_continue','distance_abort','takeoff|v1','takeoff|vr'])
def apply_nonlinear(self, inputs, outputs, residuals):
speedtol = 1e-1
disttol = 0
if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:
residuals['takeoff|v1'] = inputs['takeoff|vr'] - outputs['takeoff|v1']
else:
residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']
if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:
residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']
def linearize(self, inputs, outputs, partials):
speedtol = 1e-1
disttol = 0
if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:
partials['takeoff|v1','distance_continue'] = 0
partials['takeoff|v1','distance_abort'] = 0
partials['takeoff|v1','takeoff|vr'] = 1
partials['takeoff|v1','takeoff|v1'] = -1
else:
partials['takeoff|v1','distance_continue'] = 1
partials['takeoff|v1','distance_abort'] = -1
partials['takeoff|v1','takeoff|vr'] = 0
partials['takeoff|v1','takeoff|v1'] = 0
if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:
partials['takeoff|v1','distance_continue'] = 1
partials['takeoff|v1','distance_abort'] = -1
partials['takeoff|v1','takeoff|vr'] = 0
partials['takeoff|v1','takeoff|v1'] = 0
class Groundspeeds(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes',default=1,desc="Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1")
def setup(self):
nn = self.options['num_nodes']
self.add_input('fltcond|vs', units='m/s',shape=(nn,))
self.add_input('fltcond|Utrue', units='m/s',shape=(nn,))
self.add_output('fltcond|groundspeed', units='m/s',shape=(nn,))
self.add_output('fltcond|cosgamma', shape=(nn,), desc='Cosine of the flight path angle')
self.add_output('fltcond|singamma', shape=(nn,), desc='sin of the flight path angle' )
self.declare_partials(['fltcond|groundspeed','fltcond|cosgamma','fltcond|singamma'], ['fltcond|vs','fltcond|Utrue'], rows=range(nn), cols=range(nn))
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2
groundspeed = np.sqrt(inside)
groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))
outputs['fltcond|groundspeed'] = groundspeed_fixed
outputs['fltcond|singamma'] = np.where(np.isnan(groundspeed),1,inputs['fltcond|vs'] / inputs['fltcond|Utrue'])
outputs['fltcond|cosgamma'] = groundspeed_fixed / inputs['fltcond|Utrue']
def compute_partials(self, inputs, J):
inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2
groundspeed = np.sqrt(inside)
groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))
J['fltcond|groundspeed','fltcond|vs'] = np.where(np.isnan(groundspeed),0,(1/2) / groundspeed_fixed * (-2) * inputs['fltcond|vs'])
J['fltcond|groundspeed','fltcond|Utrue'] = np.where(np.isnan(groundspeed),0, (1/2) / groundspeed_fixed * 2 * inputs['fltcond|Utrue'])
J['fltcond|singamma','fltcond|vs'] = np.where(np.isnan(groundspeed), 0, 1 / inputs['fltcond|Utrue'])
J['fltcond|singamma','fltcond|Utrue'] = np.where(np.isnan(groundspeed), 0, - inputs['fltcond|vs'] / inputs['fltcond|Utrue'] ** 2)
J['fltcond|cosgamma','fltcond|vs'] = J['fltcond|groundspeed','fltcond|vs'] / inputs['fltcond|Utrue']
J['fltcond|cosgamma','fltcond|Utrue'] = (J['fltcond|groundspeed','fltcond|Utrue'] * inputs['fltcond|Utrue'] - groundspeed_fixed) / inputs['fltcond|Utrue']**2
class HorizontalAcceleration(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes',default=1)
def setup(self):
nn = self.options['num_nodes']
g = 9.80665
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('drag', units='N',shape=(nn,))
self.add_input('lift', units='N',shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_input('fltcond|singamma',shape=(nn,))
self.add_input('braking',shape=(nn,))
self.add_output('accel_horiz', units='m/s**2', shape=(nn,))
arange=np.arange(nn)
self.declare_partials(['accel_horiz'], ['weight','drag','lift','thrust','braking'], rows=arange, cols=arange)
self.declare_partials(['accel_horiz'], ['fltcond|singamma'], rows=arange, cols=arange, val=-g*np.ones((nn,)))
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
g = 9.80665
m = inputs['weight']
floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)
accel = inputs['thrust']/m - inputs['drag']/m - floor_vec*inputs['braking']*(g-inputs['lift']/m) - g*inputs['fltcond|singamma']
outputs['accel_horiz'] = accel
def compute_partials(self, inputs, J):
g = 9.80665
m = inputs['weight']
floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)
J['accel_horiz','thrust'] = 1/m
J['accel_horiz','drag'] = -1/m
J['accel_horiz','braking'] = -floor_vec*(g-inputs['lift']/m)
J['accel_horiz','lift'] = floor_vec*inputs['braking']/m
J['accel_horiz','weight'] = (inputs['drag']-inputs['thrust']-floor_vec*inputs['braking']*inputs['lift'])/m**2
class VerticalAcceleration(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes',default=1)
def setup(self):
nn = self.options['num_nodes']
g = 9.80665
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('drag', units='N',shape=(nn,))
self.add_input('lift', units='N',shape=(nn,))
self.add_input('thrust', units='N',shape=(nn,))
self.add_input('fltcond|singamma',shape=(nn,))
self.add_input('fltcond|cosgamma',shape=(nn,))
self.add_output('accel_vert', units='m/s**2', shape=(nn,),upper=2.5*g,lower=-1*g)
arange=np.arange(nn)
self.declare_partials(['accel_vert'], ['weight','drag','lift','thrust','fltcond|singamma','fltcond|cosgamma'], rows=arange, cols=arange)
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
g = 9.80665
cosg = inputs['fltcond|cosgamma']
sing = inputs['fltcond|singamma']
accel = (inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing - g*inputs['weight'])/inputs['weight']
accel = np.clip(accel, -g, 2.5*g)
outputs['accel_vert'] = accel
def compute_partials(self, inputs, J):
g = 9.80665
m = inputs['weight']
cosg = inputs['fltcond|cosgamma']
sing = inputs['fltcond|singamma']
J['accel_vert','thrust'] = sing / m
J['accel_vert','drag'] = -sing / m
J['accel_vert','lift'] = cosg / m
J['accel_vert','fltcond|singamma'] = (inputs['thrust']-inputs['drag']) / m
J['accel_vert','fltcond|cosgamma'] = inputs['lift'] / m
J['accel_vert','weight'] = -(inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing)/m**2
class SteadyFlightCL(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes',default=5,desc="Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1")
self.options.declare('mission_segments',default=['climb','cruise','descent'])
def setup(self):
nn = self.options['num_nodes']
arange = np.arange(nn)
self.add_input('weight', units='kg', shape=(nn,))
self.add_input('fltcond|q', units='N * m**-2', shape=(nn,))
self.add_input('ac|geom|wing|S_ref', units='m **2')
self.add_input('fltcond|cosgamma', val=1.0, shape=(nn,))
self.add_output('fltcond|CL',shape=(nn,))
self.declare_partials(['fltcond|CL'], ['weight','fltcond|q',"fltcond|cosgamma"], rows=arange, cols=arange)
self.declare_partials(['fltcond|CL'], ['ac|geom|wing|S_ref'], rows=arange, cols=np.zeros(nn))
def compute(self, inputs, outputs):
g = 9.80665
outputs['fltcond|CL'] = inputs['fltcond|cosgamma']*g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
def compute_partials(self, inputs, J):
g = 9.80665
J['fltcond|CL','weight'] = inputs['fltcond|cosgamma']*g/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
J['fltcond|CL','fltcond|q'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q']**2 / inputs['ac|geom|wing|S_ref']
J['fltcond|CL','ac|geom|wing|S_ref'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q'] / inputs['ac|geom|wing|S_ref']**2
J['fltcond|CL','fltcond|cosgamma'] = g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']
class GroundRollPhase(oc.PhaseGroup):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('fltcond|CL', val=np.ones((nn,))*0.1)
ivcomp.add_output('vr_vstall_mult',val=1.1)
ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')
ivcomp.add_output('fltcond|vs',val=np.zeros((nn,)),units='m/s')
ivcomp.add_output('zero_speed',val=2,units='m/s')
flight_phase = self.options['flight_phase']
if flight_phase == 'v0v1':
ivcomp.add_output('braking',val=np.ones((nn,))*0.03)
ivcomp.add_output('propulsor_active',val=np.ones((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
zero_start = True
elif flight_phase == 'v1vr':
ivcomp.add_output('braking',val=np.ones((nn,))*0.03)
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
zero_start = False
elif flight_phase == 'v1v0':
ivcomp.add_output('braking',val=0.4*np.ones((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.zeros((nn,)))
zero_start=False
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# add the user-defined aircraft model
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
if flight_phase == 'v1v0':
#unfortunately need to shoot backwards to avoid negative airspeeds
#reverse the order of the accelerations so the last one is first (and make them negative)
self.add_subsystem('flipaccel', FlipVectorComp(num_nodes=nn, units='m/s**2', negative=True), promotes_inputs=[('vec_in','accel_horiz')])
#integrate the timesteps in reverse from near zero speed.
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])
ode_integ.add_integrand('vel_q', units='m/s', rate_name='vel_dqdt', start_name='zero_speed', end_name='fltcond|Utrue_initial', lower=1.5)
self.connect('flipaccel.vec_out','vel_dqdt')
#flip the result of the reverse integration again so the flight condition is forward and consistent with everythign else
self.add_subsystem('flipvel', FlipVectorComp(num_nodes=nn, units='m/s', negative=False), promotes_outputs=[('vec_out','fltcond|Utrue')])
self.connect('vel_q','flipvel.vec_in')
# now set the time step so that backwards shooting results in the correct 'initial' segment airspeed
self.add_subsystem('v0constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_initial',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),
promotes_inputs=['*'],promotes_outputs=['duration'])
else:
# forward shooting for these acceleration segmentes
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])
ode_integ.add_integrand('fltcond|Utrue', units='m/s', rate_name='accel_horiz', start_name='fltcond|Utrue_initial', end_name='fltcond|Utrue_final', lower=1.5)
if flight_phase == 'v0v1':
self.connect('zero_speed','fltcond|Utrue_initial')
self.add_subsystem('v1constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),
promotes_inputs=['*'],promotes_outputs=['duration'])
elif flight_phase == 'v1vr':
self.add_subsystem('vrconstraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|vr',val=5.,upper=12.,lower=0.0),
promotes_inputs=['*'],promotes_outputs=['duration'])
if zero_start:
ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m', zero_start=True)
else:
ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m')
class RotationPhase(oc.PhaseGroup):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None)
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('CL_rotate_mult', val=np.ones((nn,))*0.83)
ivcomp.add_output('h_obs', val=35, units='ft')
flight_phase = self.options['flight_phase']
if flight_phase == 'rotate':
ivcomp.add_output('braking',val=np.zeros((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
clcomp = self.add_subsystem('clcomp',ElementMultiplyDivideComp(output_name='fltcond|CL', input_names=['CL_rotate_mult','ac|aero|CLmax_TO'],
vec_size=[nn,1], length=1),
promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('vaccel',VerticalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# TODO always starts from zero altitude
self.add_subsystem('clear_obstacle',BalanceComp(name='duration',units='s',val=1,eq_units='m',rhs_name='fltcond|h_final',lhs_name='h_obs',lower=0.1,upper=15),
promotes_inputs=['*'],promotes_outputs=['duration'])
int1 = self.add_subsystem('intvelocity', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int1.add_integrand('fltcond|Utrue', rate_name='accel_horiz', units='m/s', lower=0.1)
int2 = self.add_subsystem('intrange', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int2.add_integrand('range', rate_name='fltcond|groundspeed', units='m')
int3 = self.add_subsystem('intvs', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int3.add_integrand('fltcond|vs', rate_name='accel_vert', units='m/s', zero_start=True)
int4 = self.add_subsystem('inth', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])
int4.add_integrand('fltcond|h', rate_name='fltcond|vs', units='m', zero_start=True)
class SteadyFlightPhase(oc.PhaseGroup):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('propulsor_active', val=np.ones(nn))
ivcomp.add_output('braking', val=np.zeros(nn))
ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')
ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')
ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')
integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', time_setup='duration', method='simpson'), promotes_inputs=['fltcond|vs', 'fltcond|groundspeed'], promotes_outputs=['fltcond|h', 'range'])
integ.add_integrand('fltcond|h', rate_name='fltcond|vs', val=1.0, units='m')
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# add the user-defined aircraft model
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
integ.add_integrand('range', rate_name='fltcond|groundspeed', val=1.0, units='m')
self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),
promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])
# class OldSteadyFlightPhase(Group):
# """
# This component group models steady flight conditions.
# Settable mission parameters include:
# Airspeed (fltcond|Ueas)
# Vertical speed (fltcond|vs)
# Duration of the segment (duration)
# Throttle is set automatically to ensure steady flight
# The BaseAircraftGroup object is passed in.
# The BaseAircraftGroup should be built to accept the following inputs
# and return the following outputs.
# The outputs should be promoted to the top level in the component.
# Inputs
# ------
# range : float
# Total distance travelled (vector, m)
# fltcond|h : float
# Altitude (vector, m)
# fltcond|vs : float
# Vertical speed (vector, m/s)
# fltcond|Ueas : float
# Equivalent airspeed (vector, m/s)
# fltcond|Utrue : float
# True airspeed (vector, m/s)
# fltcond|p : float
# Pressure (vector, Pa)
# fltcond|rho : float
# Density (vector, kg/m3)
# fltcond|T : float
# Temperature (vector, K)
# fltcond|q : float
# Dynamic pressure (vector, Pa)
# fltcond|CL : float
# Lift coefficient (vector, dimensionless)
# throttle : float
# Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)
# propulsor_active : float
# If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.
# It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)
# braking : float
# Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)
# Should not be applied in the air or nonphysical effects will result (vector, dimensionless)
# lift : float
# Lift force (vector, N)
# Outputs
# -------
# thrust : float
# Total thrust force produced by all propulsors (vector, N)
# drag : float
# Total drag force in the airplane axis produced by all sources of drag (vector, N)
# weight : float
# Weight (mass, really) of the airplane at each point in time. (vector, kg)
# ac|geom|wing|S_ref
# Wing reference area (scalar, m**2)
# ac|aero|CLmax_TO
# CLmax with flaps in max takeoff position (scalar, dimensionless)
# ac|weights|MTOW
# Maximum takeoff weight (scalar, kg)
# """
# def initialize(self):
# self.options.declare('num_nodes',default=1)
# self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
# self.options.declare('aircraft_model',default=None)
# def setup(self):
# nn = self.options['num_nodes']
# ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
# ivcomp.add_output('propulsor_active', val=np.ones(nn))
# ivcomp.add_output('braking', val=np.zeros(nn))
# ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')
# ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')
# ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')
# self.add_subsystem('inth',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),
# promotes_inputs=[('dqdt','fltcond|vs'),'duration',('q_initial','fltcond|h_initial')],promotes_outputs=[('q','fltcond|h'),('q_final','fltcond|h_final')])
# self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
# self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])
# # add the user-defined aircraft model
# self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])
# self.add_subsystem('intrange',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),
# promotes_inputs=[('dqdt','fltcond|groundspeed'),'duration',('q_initial','range_initial')],promotes_outputs=[('q','range'),('q_final','range_final')])
# self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),
# promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])
class ClimbAnglePhase(Group):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
ivcomp.add_output('v2_vstall_mult',val=1.2)
ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')
ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)))
flight_phase = self.options['flight_phase']
if flight_phase == 'AllEngineClimbAngle':
ivcomp.add_output('propulsor_active',val=np.ones((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
elif flight_phase == 'EngineOutClimbAngle':
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=[('weight','ac|weights|MTOW'),'fltcond|*','ac|*'],promotes_outputs=['*'])
self.connect('takeoff|v2','fltcond|Ueas')
# the aircraft model needs to provide thrust and drag
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag',('weight','ac|weights|MTOW'),'thrust'],promotes_outputs=['gamma'])
class TakeoffTransition(ExplicitComponent):
def initialize(self):
self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')
self.options.declare('load_factor', default=1.2, desc='Load factor during circular arc transition')
def setup(self):
self.add_input('fltcond|Utrue', units='m/s', src_indices=0)
self.add_input('gamma', units='rad', src_indices=0)
self.add_output('s_transition', units='m')
self.add_output('h_transition', units='m')
self.add_output('t_transition',units='s')
self.declare_partials(['s_transition','h_transition','t_transition'], ['fltcond|Utrue','gamma'])
def compute(self, inputs, outputs):
hobs = self.options['h_obstacle']
nfactor = self.options['load_factor'] - 1
g = 9.80665 #m/s^2
gam = inputs['gamma']
ut = inputs['fltcond|Utrue']
R = ut**2/nfactor/g
st = R*np.sin(gam)
ht = R*(1-np.cos(gam))
#alternate formula if the obstacle is cleared during transition
if ht > hobs:
st = np.sqrt(R**2-(R-hobs)**2)
ht = hobs
outputs['s_transition'] = st
outputs['h_transition'] = ht
outputs['t_transition'] = st / ut
def compute_partials(self, inputs, J):
hobs = self.options['h_obstacle']
nfactor = self.options['load_factor'] - 1
g = 9.80665 #m/s^2
gam = inputs['gamma']
ut = inputs['fltcond|Utrue']
R = ut**2/nfactor/g
dRdut = 2*ut/nfactor/g
st = R*np.sin(gam)
ht = R*(1-np.cos(gam))
#alternate formula if the obstacle is cleared during transition
if ht > hobs:
st = np.sqrt(R**2-(R-hobs)**2)
dstdut = 1/2/np.sqrt(R**2-(R-hobs)**2) * (2*R*dRdut - 2*(R-hobs)*dRdut)
dstdgam = 0
dhtdut = 0
dhtdgam = 0
else:
dhtdut = dRdut*(1-np.cos(gam))
dhtdgam = R*np.sin(gam)
dstdut = dRdut*np.sin(gam)
dstdgam = R*np.cos(gam)
J['s_transition','gamma'] = dstdgam
J['s_transition','fltcond|Utrue'] = dstdut
J['h_transition','gamma'] = dhtdgam
J['h_transition','fltcond|Utrue'] = dhtdut
J['t_transition','gamma'] = dstdgam / ut
J['t_transition','fltcond|Utrue'] = (dstdut * ut - st) / ut ** 2
class TakeoffClimb(ExplicitComponent):
def initialize(self):
self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')
def setup(self):
self.add_input('h_transition', units='m')
self.add_input('gamma', units='rad',src_indices=-1)
self.add_input('fltcond|Utrue', units='m/s',src_indices=-1)
self.add_output('s_climb', units='m')
self.add_output('t_climb', units='s')
self.declare_partials(['s_climb'], ['h_transition','gamma'])
self.declare_partials(['t_climb'], ['h_transition','gamma','fltcond|Utrue'])
def compute(self, inputs, outputs):
hobs = self.options['h_obstacle']
gam = inputs['gamma']
ht = inputs['h_transition']
ut = inputs['fltcond|Utrue']
sc = (hobs-ht)/np.tan(gam)
outputs['s_climb'] = sc
outputs['t_climb'] = sc / ut
def compute_partials(self, inputs, J):
hobs = self.options['h_obstacle']
gam = inputs['gamma']
ht = inputs['h_transition']
ut = inputs['fltcond|Utrue']
sc = (hobs-ht)/np.tan(gam)
J['s_climb','gamma'] = -(hobs-ht)/np.tan(gam)**2 * (1/np.cos(gam))**2
J['s_climb','h_transition'] = -1/np.tan(gam)
J['t_climb','gamma'] = J['s_climb','gamma'] / ut
J['t_climb','h_transition'] = J['s_climb','h_transition'] / ut
J['t_climb','fltcond|Utrue'] = - sc / ut ** 2
class RobustRotationPhase(oc.PhaseGroup):
def initialize(self):
self.options.declare('num_nodes',default=1)
self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')
self.options.declare('aircraft_model',default=None)
self.options.declare('h_obstacle',default=10.66, )
def setup(self):
nn = self.options['num_nodes']
ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=["*"])
flight_phase = self.options['flight_phase']
if flight_phase == 'rotate':
ivcomp.add_output('braking',val=np.zeros((nn,)))
ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))
ivcomp.add_output('throttle',val=np.ones((nn,)))
# flight conditions are sea level takeoff, transition speed
# split off a single node to compute climb angle
# compute the transition distance and add it to range_initial
# compute the transition time as a function of the groundspeed
# provide transition time as duration
ivcomp.add_output('v2_vstall_mult',val=1.2)
ivcomp.add_output('vr_vstall_mult',val=1.1)
ivcomp.add_output('fltcond|vs', val=np.zeros((nn,)),units='m/s')
ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)),units=None)
ivcomp.add_output('h_obstacle',val=35,units='ft')
self.add_subsystem('altitudes',LinearInterpolator(num_nodes=nn, units='m'),promotes_inputs=[('start_val','h_initial')],promotes_outputs=[('vec','fltcond|h')])
self.connect('h_obstacle','altitudes.end_val')
self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])
self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('v2speed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('speeds',LinearInterpolator(num_nodes=nn,units='kn'),promotes_inputs=[('start_val','takeoff|vr'),('end_val','takeoff|v2')],promotes_outputs=[('vec','fltcond|Ueas')])
self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])
# pretty confident there's a simpler closed form multiple for CL at v2
self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['weight','fltcond|*','ac|*'],promotes_outputs=['*'])
self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag','weight','thrust'],promotes_outputs=['gamma'])
self.add_subsystem('transition',TakeoffTransition(),promotes_inputs=['fltcond|Utrue','gamma'],promotes_outputs=['h_transition','s_transition','t_transition'])
self.add_subsystem('v2climb',TakeoffClimb(),promotes_inputs=['h_transition','gamma','fltcond|Utrue'],promotes_outputs=['s_climb','t_climb'])
self.add_subsystem('tod_final',AddSubtractComp(output_name='range_final',input_names=['range_initial','s_transition','s_climb'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('duration',AddSubtractComp(output_name='duration',input_names=['t_transition','t_climb'],units='s'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('h_final',AddSubtractComp(output_name='fltcond|h_final',input_names=['h_obstacle'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])
self.add_subsystem('ranges',LinearInterpolator(num_nodes=nn,units='m'),promotes_inputs=[('start_val','range_initial'),('end_val','range_final')],promotes_outputs=[('vec','range')])
| true | true |
f72ff337bdeb94f68574412ce3e985fde8a68cf6 | 1,360 | py | Python | edumate/settings/production.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | 1 | 2021-11-28T14:18:16.000Z | 2021-11-28T14:18:16.000Z | edumate/settings/production.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | 1 | 2022-02-10T10:53:12.000Z | 2022-02-10T10:53:12.000Z | edumate/settings/production.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | null | null | null | from kombu.utils.url import safequote
from .base import *
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = '/var/www/edumate/static/'
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/www/edumate/media/'
MEDIA_URL = '/media/'
# Email
# https://docs.djangoproject.com/en/2.2/topics/email/#email-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('SENDGRID_API_KEY')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
AZURE_STORAGE_KEY = config('AZURE_STORAGE_KEY')
AZURE_STORAGE_ACCOUNT = config('AZURE_STORAGE_ACCOUNT')
INSTALLED_APPS += [
'storages',
]
AZURE_ACCOUNT_KEY = AZURE_STORAGE_KEY
AZURE_ACCOUNT_NAME = AZURE_STORAGE_ACCOUNT
DEFAULT_FILE_STORAGE = 'edumate.azure.AzureMediaStorage'
STATICFILES_STORAGE = 'edumate.azure.AzureStaticStorage'
STATIC_LOCATION = 'static'
MEDIA_LOCATION = 'media'
AZURE_CUSTOM_DOMAIN = f'{AZURE_ACCOUNT_NAME}.blob.core.windows.net'
STATIC_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
MEDIA_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/'
BROKER_URL = config('CELERY_REDIS_LOCATION')
BROKER_TRANSPORT_OPTIONS = {
'polling_interval': 10,
'visibility_timeout': 3600
}
| 27.2 | 68 | 0.775735 | from kombu.utils.url import safequote
from .base import *
STATIC_ROOT = '/var/www/edumate/static/'
STATIC_URL = '/static/'
MEDIA_ROOT = '/var/www/edumate/media/'
MEDIA_URL = '/media/'
= 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('SENDGRID_API_KEY')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
AZURE_STORAGE_KEY = config('AZURE_STORAGE_KEY')
AZURE_STORAGE_ACCOUNT = config('AZURE_STORAGE_ACCOUNT')
INSTALLED_APPS += [
'storages',
]
AZURE_ACCOUNT_KEY = AZURE_STORAGE_KEY
AZURE_ACCOUNT_NAME = AZURE_STORAGE_ACCOUNT
DEFAULT_FILE_STORAGE = 'edumate.azure.AzureMediaStorage'
STATICFILES_STORAGE = 'edumate.azure.AzureStaticStorage'
STATIC_LOCATION = 'static'
MEDIA_LOCATION = 'media'
AZURE_CUSTOM_DOMAIN = f'{AZURE_ACCOUNT_NAME}.blob.core.windows.net'
STATIC_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
MEDIA_URL = f'https://{AZURE_CUSTOM_DOMAIN}/{MEDIA_LOCATION}/'
BROKER_URL = config('CELERY_REDIS_LOCATION')
BROKER_TRANSPORT_OPTIONS = {
'polling_interval': 10,
'visibility_timeout': 3600
}
| true | true |
f72ff53d421b318c306241599d6708aa4cc0e2a0 | 6,404 | py | Python | test/functional/mempool_persist.py | minblock/Blackcoin | 40cbf6c00d79b2d2d50b0645baa332fc8adc4ba3 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | minblock/Blackcoin | 40cbf6c00d79b2d2d50b0645baa332fc8adc4ba3 | [
"MIT"
] | null | null | null | test/functional/mempool_persist.py | minblock/Blackcoin | 40cbf6c00d79b2d2d50b0645baa332fc8adc4ba3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
# Give bitcoind a second to reload the mempool
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
# Give bitcoind a second to reload the mempool
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent blackcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| 47.088235 | 207 | 0.698626 |
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
self.log.debug("Mine a single block to get out of IBD")
self.nodes[0].generate(1)
self.sync_all()
self.log.debug("Send 5 transactions from node2 (to its own address)")
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5, timeout=1)
wait_until(lambda: len(self.nodes[2].getrawmempool()) == 5, timeout=1)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
time.sleep(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: len(self.nodes[0].getrawmempool()) == 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: len(self.nodes[1].getrawmempool()) == 5)
self.log.debug("Prevent blackcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
| true | true |
f72ff59bda1aa02358d87a57bd001b6bab504743 | 5,288 | py | Python | sentence_transformers/models/Asym.py | arcada-uas/sentence-transformers | 83ec2145dae858049ce38210860f0d75b4979927 | [
"Apache-2.0"
] | null | null | null | sentence_transformers/models/Asym.py | arcada-uas/sentence-transformers | 83ec2145dae858049ce38210860f0d75b4979927 | [
"Apache-2.0"
] | null | null | null | sentence_transformers/models/Asym.py | arcada-uas/sentence-transformers | 83ec2145dae858049ce38210860f0d75b4979927 | [
"Apache-2.0"
] | null | null | null | from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
from ..util import import_from_string
from collections import OrderedDict
from typing import List, Dict, Optional, Union, Tuple
class Asym(nn.Sequential):
def __init__(self, sub_modules: Dict[str, List[nn.Module]], allow_empty_key: bool = True):
"""
This model allows to create asymmetric SentenceTransformer models, that apply different models depending on the specified input key.
In the below example, we create two different Dense models for 'query' and 'doc'. Text that is passed as {'query': 'My query'} will
be passed along along the first Dense model, and text that will be passed as {'doc': 'My document'} will use the other Dense model.
Note, that when you call encode(), that only inputs of the same type can be encoded. Mixed-Types cannot be encoded.
Example::
word_embedding_model = models.Transformer(model_name)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
asym_model = models.Asym({'query': [models.Dense(word_embedding_model.get_word_embedding_dimension(), 128)], 'doc': [models.Dense(word_embedding_model.get_word_embedding_dimension(), 128)]})
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, asym_model])
model.encode([{'query': 'Q1'}, {'query': 'Q2'}]
model.encode([{'doc': 'Doc1'}, {'doc': 'Doc2'}]
#You can train it with InputExample like this. Note, that the order must always be the same:
train_example = InputExample(texts=[{'query': 'Train query'}, {'doc': 'Document'}], label=1)
:param sub_modules: Dict in the format str -> List[models]. The models in the specified list will be applied for input marked with the respective key.
:param allow_empty_key: If true, inputs without a key can be processed. If false, an exception will be thrown if no key is specified.
"""
self.sub_modules = sub_modules
self.allow_empty_key = allow_empty_key
ordered_dict = OrderedDict()
for name, models in sub_modules.items():
if not isinstance(models, List):
models = [models]
for idx, model in enumerate(models):
ordered_dict[name+"-"+str(idx)] = model
super(Asym, self).__init__(ordered_dict)
def forward(self, features: Dict[str, Tensor]):
if 'text_keys' in features and len(features['text_keys']) > 0:
text_key = features['text_keys'][0]
for model in self.sub_modules[text_key]:
features = model(features)
elif not self.allow_empty_key:
raise ValueError('Input did not specify any keys and allow_empty_key is False')
return features
def get_sentence_embedding_dimension(self) -> int:
raise NotImplementedError()
def save(self, output_path):
model_lookup = {}
model_types = {}
model_structure = {}
for name, models in self.sub_modules.items():
model_structure[name] = []
for model in models:
model_id = str(id(model))+'_'+type(model).__name__
model_lookup[model_id] = model
model_types[model_id] = type(model).__module__
model_structure[name].append(model_id)
for model_id, model in model_lookup.items():
model_path = os.path.join(output_path, str(model_id))
os.makedirs(model_path, exist_ok=True)
model.save(model_path)
with open(os.path.join(output_path, 'config.json'), 'w', encoding='utf8') as fOut:
json.dump({'types': model_types, 'structure': model_structure,
'parameters': {'allow_empty_key': self.allow_empty_key}},
fOut, indent=2)
def tokenize(self, texts: Union[List[str], List[Tuple[str, str]]]):
"""
Tokenizes a text and maps tokens to token-ids
"""
if not isinstance(texts[0], dict):
raise AttributeError("Asym. model requires that texts are passed as dicts: {'key': 'text'}")
module_key = None
for lookup in texts:
text_key, text = next(iter(lookup.items()))
if module_key is None:
module_key = text_key
assert text_key == module_key #Mixed batches are not allowed
return self.sub_modules[module_key][0].tokenize(texts)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
modules = {}
for model_id, model_type in config['types'].items():
module_class = import_from_string(model_type)
module = module_class.load(os.path.join(input_path, model_id))
modules[model_id] = module
model_structure = {}
for key_name, models_list in config['structure'].items():
model_structure[key_name] = []
for model_id in models_list:
model_structure[key_name].append(modules[model_id])
model = Asym(model_structure, **config['parameters'])
return model
| 42.99187 | 202 | 0.637103 | from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
from ..util import import_from_string
from collections import OrderedDict
from typing import List, Dict, Optional, Union, Tuple
class Asym(nn.Sequential):
def __init__(self, sub_modules: Dict[str, List[nn.Module]], allow_empty_key: bool = True):
self.sub_modules = sub_modules
self.allow_empty_key = allow_empty_key
ordered_dict = OrderedDict()
for name, models in sub_modules.items():
if not isinstance(models, List):
models = [models]
for idx, model in enumerate(models):
ordered_dict[name+"-"+str(idx)] = model
super(Asym, self).__init__(ordered_dict)
def forward(self, features: Dict[str, Tensor]):
if 'text_keys' in features and len(features['text_keys']) > 0:
text_key = features['text_keys'][0]
for model in self.sub_modules[text_key]:
features = model(features)
elif not self.allow_empty_key:
raise ValueError('Input did not specify any keys and allow_empty_key is False')
return features
def get_sentence_embedding_dimension(self) -> int:
raise NotImplementedError()
def save(self, output_path):
model_lookup = {}
model_types = {}
model_structure = {}
for name, models in self.sub_modules.items():
model_structure[name] = []
for model in models:
model_id = str(id(model))+'_'+type(model).__name__
model_lookup[model_id] = model
model_types[model_id] = type(model).__module__
model_structure[name].append(model_id)
for model_id, model in model_lookup.items():
model_path = os.path.join(output_path, str(model_id))
os.makedirs(model_path, exist_ok=True)
model.save(model_path)
with open(os.path.join(output_path, 'config.json'), 'w', encoding='utf8') as fOut:
json.dump({'types': model_types, 'structure': model_structure,
'parameters': {'allow_empty_key': self.allow_empty_key}},
fOut, indent=2)
def tokenize(self, texts: Union[List[str], List[Tuple[str, str]]]):
if not isinstance(texts[0], dict):
raise AttributeError("Asym. model requires that texts are passed as dicts: {'key': 'text'}")
module_key = None
for lookup in texts:
text_key, text = next(iter(lookup.items()))
if module_key is None:
module_key = text_key
assert text_key == module_key
return self.sub_modules[module_key][0].tokenize(texts)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
modules = {}
for model_id, model_type in config['types'].items():
module_class = import_from_string(model_type)
module = module_class.load(os.path.join(input_path, model_id))
modules[model_id] = module
model_structure = {}
for key_name, models_list in config['structure'].items():
model_structure[key_name] = []
for model_id in models_list:
model_structure[key_name].append(modules[model_id])
model = Asym(model_structure, **config['parameters'])
return model
| true | true |
f72ff810f29418ba62e98a1ce7f5672ac06ff9c2 | 1,005 | py | Python | CI/create_release_notes.py | tdhooks/sarus | 64d3152e810b1081e6dbe7b3587e8e5948c3268e | [
"BSD-3-Clause"
] | 84 | 2019-04-30T17:35:14.000Z | 2022-03-20T21:15:41.000Z | CI/create_release_notes.py | tdhooks/sarus | 64d3152e810b1081e6dbe7b3587e8e5948c3268e | [
"BSD-3-Clause"
] | 26 | 2019-11-07T19:24:36.000Z | 2022-02-10T14:18:58.000Z | CI/create_release_notes.py | tdhooks/sarus | 64d3152e810b1081e6dbe7b3587e8e5948c3268e | [
"BSD-3-Clause"
] | 10 | 2019-05-24T02:20:02.000Z | 2022-03-20T14:17:29.000Z | def create_release_notes():
import os
path = os.path.dirname(os.path.abspath(__file__))
changelog_filename = os.path.join(path, "../CHANGELOG.md")
release_notes_filename = os.path.join(path, "../RELEASE_NOTES.md")
with open(changelog_filename, "r") as changelog:
with open(release_notes_filename, "w") as release_notes:
started = False
# Search top-most release notes
while not started:
line = changelog.readline()
if not line:
break
if line.startswith("## ["):
started = True
while started:
# reduce title indentation
if line.startswith("##"):
line = line[1:]
release_notes.write(line)
line = changelog.readline()
if not line or line.startswith("## ["):
break
if __name__ == "__main__":
create_release_notes() | 31.40625 | 70 | 0.528358 | def create_release_notes():
import os
path = os.path.dirname(os.path.abspath(__file__))
changelog_filename = os.path.join(path, "../CHANGELOG.md")
release_notes_filename = os.path.join(path, "../RELEASE_NOTES.md")
with open(changelog_filename, "r") as changelog:
with open(release_notes_filename, "w") as release_notes:
started = False
while not started:
line = changelog.readline()
if not line:
break
if line.startswith("## ["):
started = True
while started:
if line.startswith("##"):
line = line[1:]
release_notes.write(line)
line = changelog.readline()
if not line or line.startswith("## ["):
break
if __name__ == "__main__":
create_release_notes() | true | true |
f72ff87bdabef8d7929fc9db97e276b84505f580 | 1,196 | py | Python | zoomapi/components/chat_messages.py | zihuaweng/zoomapi | 0bd9e57f1b2469b1071e8060feb772748882c175 | [
"Apache-2.0"
] | null | null | null | zoomapi/components/chat_messages.py | zihuaweng/zoomapi | 0bd9e57f1b2469b1071e8060feb772748882c175 | [
"Apache-2.0"
] | null | null | null | zoomapi/components/chat_messages.py | zihuaweng/zoomapi | 0bd9e57f1b2469b1071e8060feb772748882c175 | [
"Apache-2.0"
] | null | null | null | """Zoom.us REST API Python Client -- Chat Messages component"""
from zoomapi.util import require_keys, Throttled
from zoomapi.components import base
class ChatMessagesComponentV2(base.BaseComponent):
"""Component dealing with all chat messages related matters"""
@Throttled
def list(self, **kwargs):
require_keys(kwargs, "user_id")
return self.get_request(
"/chat/users/{}/messages".format(kwargs.get("user_id")), params=kwargs
)
@Throttled
def post(self, **kwargs):
require_keys(kwargs, "message")
return self.post_request("/chat/users/me/messages", data=kwargs)
@Throttled
def send(self, **kwargs):
require_keys(kwargs, "message")
return self.post_request("/chat/users/me/messages", data=kwargs)
@Throttled
def update(self, **kwargs):
require_keys(kwargs, "message")
return self.put_request("/chat/users/me/messages/{}".format(kwargs.get("messageId")), data=kwargs)
@Throttled
def delete(self, **kwargs):
require_keys(kwargs, "messageId")
return self.delete_request("/chat/users/me/messages/{}".format(kwargs.get("messageId")), params=kwargs)
| 34.171429 | 111 | 0.669732 |
from zoomapi.util import require_keys, Throttled
from zoomapi.components import base
class ChatMessagesComponentV2(base.BaseComponent):
@Throttled
def list(self, **kwargs):
require_keys(kwargs, "user_id")
return self.get_request(
"/chat/users/{}/messages".format(kwargs.get("user_id")), params=kwargs
)
@Throttled
def post(self, **kwargs):
require_keys(kwargs, "message")
return self.post_request("/chat/users/me/messages", data=kwargs)
@Throttled
def send(self, **kwargs):
require_keys(kwargs, "message")
return self.post_request("/chat/users/me/messages", data=kwargs)
@Throttled
def update(self, **kwargs):
require_keys(kwargs, "message")
return self.put_request("/chat/users/me/messages/{}".format(kwargs.get("messageId")), data=kwargs)
@Throttled
def delete(self, **kwargs):
require_keys(kwargs, "messageId")
return self.delete_request("/chat/users/me/messages/{}".format(kwargs.get("messageId")), params=kwargs)
| true | true |
f72ff90f4cf8c111f5cfabc254f6f2eba07babf7 | 23,978 | py | Python | pcdet/utils/loss_utils.py | jialeli1/From-Voxel-to-Point | b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93 | [
"Apache-2.0"
] | 26 | 2021-07-14T10:55:14.000Z | 2022-02-25T05:46:42.000Z | pcdet/utils/loss_utils.py | jialeli1/From-Voxel-to-Point | b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93 | [
"Apache-2.0"
] | 2 | 2021-07-12T09:58:00.000Z | 2021-12-14T13:04:47.000Z | pcdet/utils/loss_utils.py | jialeli1/From-Voxel-to-Point | b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93 | [
"Apache-2.0"
] | 4 | 2021-08-22T16:41:35.000Z | 2022-03-18T06:54:52.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from . import box_utils
from . import center_utils
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
# 这里flip的目的应该是忽略朝向,但实际上呢把朝向也纳入整体更好还是说它会造成不稳定呢?
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
def get_corner_loss_mse(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (1,) float scaler
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
# (N, 8, 3)
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
# print('==> pred_box_corners[0, :, :]')
# print(pred_box_corners[0,:,:])
# print('==> gt_box_corners[0, :, :]')
# print(gt_box_corners[0,:,:])
# print('==> pred_box_corners[10, :, :]')
# print(pred_box_corners[10,:,:])
# print('==> gt_box_corners[10, :, :]')
# print(gt_box_corners[10,:,:])
# print('==> pred_box_corners[100, :, :]')
# print(pred_box_corners[100,:,:])
# print('==> gt_box_corners[100, :, :]')
# print(gt_box_corners[100,:,:])
# for each box, mean by 8 corners.
corner_loss_x = F.mse_loss(input=pred_box_corners[:,:,0], target=gt_box_corners[:,:,0]) # (N, 8) -> (N)
corner_loss_y = F.mse_loss(input=pred_box_corners[:,:,1], target=gt_box_corners[:,:,1]) # (N, 8) -> (N)
corner_loss_z = F.mse_loss(input=pred_box_corners[:,:,2], target=gt_box_corners[:,:,2]) # (N, 8) -> (N)
# xyz之间求和
corner_loss = corner_loss_x + corner_loss_y + corner_loss_z
return corner_loss
def get_iouscore_loss_bce(iou_preds, iou_gts, iou_fg_thresh=0.75, iou_bg_thresh=0.25):
"""
Args:
iou_preds: (N,)
iou_gts: (N, )
Returns:
loss_iouscore:
"""
# prepare the labels
# now only for car class, 08132020
# iou_preds = iou_preds.view(-1)
# iou_gts = iou_gts.view(-1)
# print('==> iou_preds.size()')
# print(iou_preds.size())
# print(torch.sigmoid(iou_preds))
# print('==> iou_gts.size()')
# print(iou_gts.size())
# print(iou_gts)
# CLS_FG_THRESH: 0.75
# CLS_BG_THRESH: 0.25
# iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
# iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
# iou_bg_thresh = 0.25
# iou_fg_thresh = 0.75
fg_mask = iou_gts > iou_fg_thresh
bg_mask = iou_gts < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
iou_cls_labels = (fg_mask > 0).float()
iou_cls_labels[interval_mask] = \
(iou_gts[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
# print('==> iou_cls_labels')
# print(iou_cls_labels.size())
# print(iou_cls_labels[:50])
# 这里CE是计算的整个范围的iou,但是最后求和的时候只计算了iou>=0这部分的。
# 条件 iou_cls_labels >= 0 选出来了那些iou >= 0 的候选框。
loss_ioucls = F.binary_cross_entropy(torch.sigmoid(iou_preds), iou_cls_labels.float(), reduction='none')
cls_valid_mask = (iou_cls_labels >= 0).float()
loss_iouscore = (loss_ioucls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
return loss_iouscore
def get_rot_binres_loss(pred_reg, reg_label, num_head_bin, get_ry_fine=False):
"""
Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.
:param pred_reg: (N, C)
:param reg_label: (N, 1), ry
:param num_head_bin: constant
:param get_ry_fine: False
:return:
"""
# print('==> pred_reg.size()')
# print(pred_reg.size()) # should be (N, 24)
reg_loss_dict = {}
# angle loss
start_offset = 0
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
start_offset = ry_res_r
ry_label = reg_label.squeeze(dim=-1)
# print('==> reg_label[] in encode')
# print(reg_label.size()) # should be (N, C)
# print(reg_label[100:150])
# print('==> ry_label[] in encode')
# print(ry_label.size()) # should be (N,)
# print(ry_label[100:150])
if get_ry_fine:
assert False, "one-stage should not get_ry_fine."
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)
ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
# print('==> heading_angle[] in encode')
# print(heading_angle.size())
# print(heading_angle[100:150])
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
# print('==> ry_bin_label in encode')
# print(ry_bin_label.size())
# print(ry_bin_label[100:150])
ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()
ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)
loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)
loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()
reg_loss_dict['loss_ry_res'] = loss_ry_res.item()
angle_loss = loss_ry_bin + loss_ry_res
# Total regression loss
reg_loss_dict['loss_angle'] = angle_loss
return angle_loss, reg_loss_dict
class CenterNetFocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self, gamma=4, alpha=2):
super(CenterNetFocalLoss, self).__init__()
# self.neg_loss = _neg_loss
self.gamma = gamma
self.alpha = alpha
def _sigmoid(self, x):
# y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)
# dnnt use the replace version!
y = torch.clamp(torch.sigmoid(x), min=1e-4, max=1 - 1e-4)
# too small will cause loss nan.
# y = torch.clamp(x.sigmoid_(), min=1e-12, max=1 - 1e-12)
return y
def _neg_loss(self, pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred: (batch x c x h x w), do some clamp or not?. should be clampped already.
gt: (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
# neg_weights = torch.pow(1 - gt, 4)
neg_weights = torch.pow(1 - gt, self.gamma)
loss = 0
# pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
# neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
out_norm = self._sigmoid(out)
return self._neg_loss(out_norm, target)
class CenterNetResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetResLoss, self).__init__()
self.res_func_type = cfg['res_func']
def forward(self, output, mask, ind, target):
"""
Args:
output: torch.Size([B, C, 152, 152])
mask: torch.Size([B, max_objs])
ind: torch.Size([B, max_objs])
target: torch.Size([B, max_objs, C])
Returns:
reduced and weighted loss term.
"""
pred = center_utils._transpose_and_gather_feat(output, ind) # (B, max_objs, C)
# print('==> (ind != 0).float().sum(): ', (ind != 0).float().sum() )
# print('==> mask.sum(): ', mask.sum() )
if mask.sum():
# 1. flatten.
pred_flat = pred.view(-1, pred.shape[-1]) #(B*max_objs, C)
target_flat = target.view(-1, target.shape[-1]) #(B*max_objs, C)
mask_flat = mask.view(-1).bool() #(B*max_objs)
# 2. valid select
pred_valid = pred_flat[mask_flat] #(num_valid, C)
target_valid = target_flat[mask_flat] #(num_valid, C)
# 3. un-reduced loss term
if self.res_func_type == 'smooth-l1':
loss = F.smooth_l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'l1':
loss = F.l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'balanced_l1':
loss = get_balanced_l1_loss(pred_valid, target_valid)
else:
raise NotImplementedError
# mean for num_obj_dims, sum for channel_dims
# (num_valid, C) -> (C) -> ()
loss = loss.mean(dim=0).sum()
else:
loss = 0.
return loss
class CenterNetRotBinResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetRotBinResLoss, self).__init__()
self.num_head_bin = cfg['num_bins']
def forward(self, output, mask, ind, target):
"""
Args:
output: torch.Size([B, C, 152, 152])
mask: torch.Size([B, max_objs])
ind: torch.Size([B, max_objs])
target: torch.Size([B, max_objs, C])
Returns:
reduced and weighted loss term.
"""
pred = center_utils._transpose_and_gather_feat(output, ind) # torch.Size([1, 500, 2])
if mask.sum():
# 1. flatten
pred_flat = pred.view(-1, pred.shape[-1]) # (B*max_objs, C)
target_flat = target.view(-1, target.shape[-1]) # (B*max_objs, 1)
mask_flat = mask.view(-1).bool() # (B*max_objs)
# 2. valid select
pred_valid = pred_flat[mask_flat] # (num_valid, C)
target_valid = target_flat[mask_flat] # (num_valid, 1)
# 3. return the reduced rot loss term.
loss, _ = get_rot_binres_loss(pred_valid, target_valid, num_head_bin=self.num_head_bin)
else:
loss = 0.
# print('==> loss in rot')
# print(loss)
return loss
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
NOTE probas should be applied with softmax.
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
# print('==> lovasz_softmax, classes: ', classes)
# print('==> lovasz_softmax, per_image: ', per_image)
# print('==> lovasz_softmax, ignore: ', ignore)
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 2:
# do nothing, 3D segmentation for sparse tensor
pass
elif probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
elif probas.dim() == 5:
# 3D segmentation for dense tensor
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is not None:
valid = (labels != ignore)
# vprobas = probas[valid.nonzero().squeeze()]
# for newer pytorch
vprobas = probas[torch.nonzero(valid, as_tuple=False).squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
else:
return probas, labels
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
| 35.418021 | 118 | 0.594378 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from . import box_utils
from . import center_utils
try:
from itertools import ifilterfalse
except ImportError:
from itertools import filterfalse as ifilterfalse
class SigmoidFocalClassificationLoss(nn.Module):
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
target = torch.where(torch.isnan(target), input, target)
diff = input - target
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
target = torch.where(torch.isnan(target), input, target)
diff = input - target
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
def get_corner_loss_mse(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
corner_loss_x = F.mse_loss(input=pred_box_corners[:,:,0], target=gt_box_corners[:,:,0])
corner_loss_y = F.mse_loss(input=pred_box_corners[:,:,1], target=gt_box_corners[:,:,1])
corner_loss_z = F.mse_loss(input=pred_box_corners[:,:,2], target=gt_box_corners[:,:,2])
corner_loss = corner_loss_x + corner_loss_y + corner_loss_z
return corner_loss
def get_iouscore_loss_bce(iou_preds, iou_gts, iou_fg_thresh=0.75, iou_bg_thresh=0.25):
fg_mask = iou_gts > iou_fg_thresh
bg_mask = iou_gts < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
iou_cls_labels = (fg_mask > 0).float()
iou_cls_labels[interval_mask] = \
(iou_gts[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
loss_ioucls = F.binary_cross_entropy(torch.sigmoid(iou_preds), iou_cls_labels.float(), reduction='none')
cls_valid_mask = (iou_cls_labels >= 0).float()
loss_iouscore = (loss_ioucls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
return loss_iouscore
def get_rot_binres_loss(pred_reg, reg_label, num_head_bin, get_ry_fine=False):
= {}
start_offset = 0
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
start_offset = ry_res_r
ry_label = reg_label.squeeze(dim=-1)
_fine:
assert False, "one-stage should not get_ry_fine."
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi)
opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)
ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi)
shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi)
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()
ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)
loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)
loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()
reg_loss_dict['loss_ry_res'] = loss_ry_res.item()
angle_loss = loss_ry_bin + loss_ry_res
reg_loss_dict['loss_angle'] = angle_loss
return angle_loss, reg_loss_dict
class CenterNetFocalLoss(nn.Module):
def __init__(self, gamma=4, alpha=2):
super(CenterNetFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def _sigmoid(self, x):
y = torch.clamp(torch.sigmoid(x), min=1e-4, max=1 - 1e-4)
return y
def _neg_loss(self, pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, self.gamma)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
out_norm = self._sigmoid(out)
return self._neg_loss(out_norm, target)
class CenterNetResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetResLoss, self).__init__()
self.res_func_type = cfg['res_func']
def forward(self, output, mask, ind, target):
pred = center_utils._transpose_and_gather_feat(output, ind)
if mask.sum():
pred_flat = pred.view(-1, pred.shape[-1])
target_flat = target.view(-1, target.shape[-1])
mask_flat = mask.view(-1).bool()
pred_valid = pred_flat[mask_flat]
target_valid = target_flat[mask_flat]
if self.res_func_type == 'smooth-l1':
loss = F.smooth_l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'l1':
loss = F.l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'balanced_l1':
loss = get_balanced_l1_loss(pred_valid, target_valid)
else:
raise NotImplementedError
loss = loss.mean(dim=0).sum()
else:
loss = 0.
return loss
class CenterNetRotBinResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetRotBinResLoss, self).__init__()
self.num_head_bin = cfg['num_bins']
def forward(self, output, mask, ind, target):
pred = center_utils._transpose_and_gather_feat(output, ind)
if mask.sum():
pred_flat = pred.view(-1, pred.shape[-1])
target_flat = target.view(-1, target.shape[-1])
mask_flat = mask.view(-1).bool()
pred_valid = pred_flat[mask_flat]
target_valid = target_flat[mask_flat]
loss, _ = get_rot_binres_loss(pred_valid, target_valid, num_head_bin=self.num_head_bin)
else:
loss = 0.
return loss
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
if probas.numel() == 0:
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float()
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def lovasz_grad(gt_sorted):
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_probas(probas, labels, ignore=None):
if probas.dim() == 2:
pass
elif probas.dim() == 3:
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
elif probas.dim() == 5:
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
labels = labels.view(-1)
if ignore is not None:
valid = (labels != ignore)
vprobas = probas[torch.nonzero(valid, as_tuple=False).squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
else:
return probas, labels
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
| true | true |
f72ff95e1b300049408b355850be32b4312a414b | 10,905 | py | Python | wsd/graph_wsd_test_v1.py | Bharat-Runwal/path2vec | f99188b882752ff9aa2c87334979b75483940ae0 | [
"Apache-2.0"
] | 31 | 2018-08-19T22:34:53.000Z | 2022-03-23T13:39:48.000Z | wsd/graph_wsd_test_v1.py | Bharat-Runwal/path2vec | f99188b882752ff9aa2c87334979b75483940ae0 | [
"Apache-2.0"
] | 21 | 2018-08-24T11:52:59.000Z | 2021-01-30T18:39:47.000Z | wsd/graph_wsd_test_v1.py | Bharat-Runwal/path2vec | f99188b882752ff9aa2c87334979b75483940ae0 | [
"Apache-2.0"
] | 11 | 2018-08-20T05:34:06.000Z | 2021-12-07T06:53:23.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 7 17:13:25 2018
@author: dorgham
"""
import networkx as nx
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from collections import OrderedDict
import codecs
import string
from nltk.corpus import stopwords
from sklearn.metrics import f1_score, precision_score, recall_score
#algorithm parameters
USE_POS_INFO = True
USE_LESK = False
USE_PAGERANK = True
AVG_METHOD = 'micro'
MAX_DEPTH = 3
LESK_NORM_FACTOR = 20 #this value is emperical
senseval_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.data.xml'
gold_tags_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.gold.key.txt'
info_content = wordnet_ic.ic('ic-semcor.dat')
wnlemmatizer = WordNetLemmatizer()
pywsd_stopwords = [u"'s", u"``", u"`"]
STOPWORDS = set(stopwords.words('english') + list(string.punctuation) + pywsd_stopwords)
def lch_similarity(synset1, synset2):
return wn.lch_similarity(synset1, synset2)
def jcn_similarity(synset1, synset2):
return wn.jcn_similarity(synset1, synset2, info_content)
def lesk_similarity(synset1, synset2):
str1 = str(synset1.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset1.examples():
str1 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str1=''
for word in set(str1.split()):
lemmatized_str1 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset1.lemma_names():
lemmatized_str1 += ' ' + lemma
hyper_hypo = set(synset1.hyponyms() + synset1.hypernyms() + synset1.instance_hyponyms() + synset1.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str1 += ' ' + lemma
current_set = set(lemmatized_str1.split())
current_set = set(cs.lower() for cs in current_set)
current_set = current_set.difference(STOPWORDS)
#print (current_set)
str2 = str(synset2.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset2.examples():
str2 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str2=''
for word in set(str2.split()):
lemmatized_str2 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset2.lemma_names():
lemmatized_str2 += ' ' + lemma
hyper_hypo = set(synset2.hyponyms() + synset2.hypernyms() + synset2.instance_hyponyms() + synset2.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str2 += ' ' + lemma
neighbor_set = set(lemmatized_str2.split())
neighbor_set = set(ns.lower() for ns in neighbor_set)
neighbor_set = neighbor_set.difference(STOPWORDS)
#print (neighbor_set)
return len(current_set.intersection(neighbor_set))
def convert_to_wordnet_pos(senseval_pos):
if senseval_pos == 'VERB':
return wn.VERB
elif senseval_pos == 'NOUN':
return wn.NOUN
elif senseval_pos == 'ADV':
return wn.ADV
elif senseval_pos == 'ADJ':
return wn.ADJ
else:
return None
def sentence_wsd(sentences, poses):
counter=0
output_dict = dict()
for sentence in sentences:
G=nx.Graph()
sent_len = len(sentence.keys())
G_pos = dict() #used for aligning the nodes when drawing the graph
pos_idx=1
token_nodeNames_map = dict()
pos_dict = poses[counter]
#construct the nodes of the graph
for i, _id in enumerate(sentence.keys()):
if USE_POS_INFO: #restrict the retrieved snysets from wordnet to the target pos
wn_pos = convert_to_wordnet_pos(pos_dict[_id])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[_id], pos=wn_pos))
if len(synsets_list) > 0:
node_names = []
for synset in synsets_list:
node_name = str(i) + ' ' + synset.name()
#adding the index to the node name is important in the case of
#having a word that is repeated in the sentence but with
#different sense each time, so we want unique node for each one.
G.add_node(node_name)
node_names.append(node_name)
token_nodeNames_map[_id] = node_names
G_pos.update( (label, (pos_idx, j)) for j, label in enumerate(node_names) )
pos_idx+=1
#compute word similarity
ids_list = list(sentence.keys())
lch_sim_dict = dict()
jcn_sim_dict = dict()
lesk_sim_dict = dict()
#print sentence.values()
for idx, key in enumerate(ids_list):
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx]])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[ids_list[idx]], pos=wn_pos))
if len(synsets_list) > 0:
i = 1
while i<=MAX_DEPTH and idx+i<sent_len:
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx+i]])
else:
wn_pos = None
next_synsets_list = list(wn.synsets(sentence[ids_list[idx+i]], pos=wn_pos))
if len(next_synsets_list) > 0:
for current_synset in synsets_list:
for neighbor_synset in next_synsets_list:
nodes = str(idx) + ' ' + current_synset.name() + ';'
nodes += str(idx+i) + ' ' + neighbor_synset.name()
if current_synset.pos() == 'v' and neighbor_synset.pos() == 'v':
sim_weight = lch_similarity(current_synset, neighbor_synset)
lch_sim_dict[nodes] = sim_weight
elif current_synset.pos() == 'n' and neighbor_synset.pos() == 'n':
sim_weight = jcn_similarity(current_synset, neighbor_synset)
jcn_sim_dict[nodes] = sim_weight
elif USE_LESK:
sim_weight = lesk_similarity(current_synset, neighbor_synset)
lesk_sim_dict[nodes] = sim_weight
i+=1
#normalize the similarity weights and build edges
if lch_sim_dict:
max_lch_score = max(lch_sim_dict.values())
for key in lch_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lch_sim_dict[key]/max_lch_score))
if jcn_sim_dict:
max_jcn_score = max(jcn_sim_dict.values())
for key in jcn_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(jcn_sim_dict[key]/max_jcn_score))
if USE_LESK:
if lesk_sim_dict:
max_lesk_score = max(lesk_sim_dict.values())
if max_lesk_score > 0:
for key in lesk_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lesk_sim_dict[key]/LESK_NORM_FACTOR))
#compute graph centrality
node_scores = dict()
if USE_PAGERANK:
node_scores = nx.pagerank(G)
else:
node_scores = G.degree(G.nodes(), "weight")
for token_id in ids_list:
nodeNames = token_nodeNames_map.get(token_id)
scores = []
max_label = ""
wordnet_key = ""
if nodeNames:
for nodeName in nodeNames:
scores.append(node_scores[nodeName])
if scores:
max_index = max(range(len(scores)), key=scores.__getitem__)
max_label = nodeNames[max_index]
if max_label:
i = max_label.find(' ')
lemmas = wn.synset(max_label[i+1:]).lemmas()
for lemma in lemmas:
wordnet_key += lemma.key()+';'
wordnet_key = wordnet_key[0:-1]
output_dict[token_id] = wordnet_key
#add the weight as attribute to the nodes of the graph
#for node in node_scores.keys():
# G.node[node]['weight']=node_scores[node]
counter += 1
if counter==1: #draw the graph of the first sentence
plt.close()
nx.draw(G, pos=G_pos, with_labels = True)
plt.show()
G.clear()
return output_dict
def load_senseval_data(file_path):
tokens_dict = OrderedDict()
pos_dict = OrderedDict()
sentences = []
pos_list = []
tree = ET.parse(file_path)
root = tree.getroot()
for text in root:
for sentence in text:
for word in sentence:
if word.tag == 'instance' and word.attrib['id']: #only include words with the <instance> tag
tokens_dict[word.attrib['id']] = word.text
pos_dict[word.attrib['id']] = word.attrib['pos']
if tokens_dict:
sentences.append(tokens_dict)
pos_list.append(pos_dict)
tokens_dict = dict()
pos_dict = dict()
return sentences, pos_list
if __name__ == "__main__":
sents, poses = load_senseval_data(senseval_fpath)
output_dict = sentence_wsd(sents, poses)
#load the gold results
with codecs.open(gold_tags_fpath, 'r', 'utf-8') as f:
lines = f.readlines()
wsd_output = []
gold_output = []
for line in lines:
id_key_pair = line.split()
predicted_keys = output_dict[id_key_pair[0]].split(';')
gold_keys_set = set(id_key_pair[1:])
predected_keys_set = set(predicted_keys)
if len(predected_keys_set.intersection(gold_keys_set)) > 0:
wsd_output.append(predicted_keys[0])
gold_output.append(predicted_keys[0])
else:
wsd_output.append(predicted_keys[0])
gold_output.append(id_key_pair[1])
assert len(wsd_output) == len(gold_output)
f1 = f1_score(gold_output, wsd_output, average=AVG_METHOD)
precision = precision_score(gold_output, wsd_output, average=AVG_METHOD)
recall = recall_score(gold_output, wsd_output, average=AVG_METHOD)
print ('F-score: %1.4f' % f1, ' Precision: %1.4f' % precision, ' Recall: %1.4f' % recall)
| 40.239852 | 123 | 0.584411 |
import networkx as nx
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from collections import OrderedDict
import codecs
import string
from nltk.corpus import stopwords
from sklearn.metrics import f1_score, precision_score, recall_score
USE_POS_INFO = True
USE_LESK = False
USE_PAGERANK = True
AVG_METHOD = 'micro'
MAX_DEPTH = 3
LESK_NORM_FACTOR = 20
senseval_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.data.xml'
gold_tags_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.gold.key.txt'
info_content = wordnet_ic.ic('ic-semcor.dat')
wnlemmatizer = WordNetLemmatizer()
pywsd_stopwords = [u"'s", u"``", u"`"]
STOPWORDS = set(stopwords.words('english') + list(string.punctuation) + pywsd_stopwords)
def lch_similarity(synset1, synset2):
return wn.lch_similarity(synset1, synset2)
def jcn_similarity(synset1, synset2):
return wn.jcn_similarity(synset1, synset2, info_content)
def lesk_similarity(synset1, synset2):
str1 = str(synset1.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset1.examples():
str1 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str1=''
for word in set(str1.split()):
lemmatized_str1 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset1.lemma_names():
lemmatized_str1 += ' ' + lemma
hyper_hypo = set(synset1.hyponyms() + synset1.hypernyms() + synset1.instance_hyponyms() + synset1.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str1 += ' ' + lemma
current_set = set(lemmatized_str1.split())
current_set = set(cs.lower() for cs in current_set)
current_set = current_set.difference(STOPWORDS)
#print (current_set)
str2 = str(synset2.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset2.examples():
str2 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str2=''
for word in set(str2.split()):
lemmatized_str2 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset2.lemma_names():
lemmatized_str2 += ' ' + lemma
hyper_hypo = set(synset2.hyponyms() + synset2.hypernyms() + synset2.instance_hyponyms() + synset2.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str2 += ' ' + lemma
neighbor_set = set(lemmatized_str2.split())
neighbor_set = set(ns.lower() for ns in neighbor_set)
neighbor_set = neighbor_set.difference(STOPWORDS)
#print (neighbor_set)
return len(current_set.intersection(neighbor_set))
def convert_to_wordnet_pos(senseval_pos):
if senseval_pos == 'VERB':
return wn.VERB
elif senseval_pos == 'NOUN':
return wn.NOUN
elif senseval_pos == 'ADV':
return wn.ADV
elif senseval_pos == 'ADJ':
return wn.ADJ
else:
return None
def sentence_wsd(sentences, poses):
counter=0
output_dict = dict()
for sentence in sentences:
G=nx.Graph()
sent_len = len(sentence.keys())
G_pos = dict() #used for aligning the nodes when drawing the graph
pos_idx=1
token_nodeNames_map = dict()
pos_dict = poses[counter]
#construct the nodes of the graph
for i, _id in enumerate(sentence.keys()):
if USE_POS_INFO: #restrict the retrieved snysets from wordnet to the target pos
wn_pos = convert_to_wordnet_pos(pos_dict[_id])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[_id], pos=wn_pos))
if len(synsets_list) > 0:
node_names = []
for synset in synsets_list:
node_name = str(i) + ' ' + synset.name()
#adding the index to the node name is important in the case of
#having a word that is repeated in the sentence but with
#different sense each time, so we want unique node for each one.
G.add_node(node_name)
node_names.append(node_name)
token_nodeNames_map[_id] = node_names
G_pos.update( (label, (pos_idx, j)) for j, label in enumerate(node_names) )
pos_idx+=1
#compute word similarity
ids_list = list(sentence.keys())
lch_sim_dict = dict()
jcn_sim_dict = dict()
lesk_sim_dict = dict()
#print sentence.values()
for idx, key in enumerate(ids_list):
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx]])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[ids_list[idx]], pos=wn_pos))
if len(synsets_list) > 0:
i = 1
while i<=MAX_DEPTH and idx+i<sent_len:
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx+i]])
else:
wn_pos = None
next_synsets_list = list(wn.synsets(sentence[ids_list[idx+i]], pos=wn_pos))
if len(next_synsets_list) > 0:
for current_synset in synsets_list:
for neighbor_synset in next_synsets_list:
nodes = str(idx) + ' ' + current_synset.name() + ';'
nodes += str(idx+i) + ' ' + neighbor_synset.name()
if current_synset.pos() == 'v' and neighbor_synset.pos() == 'v':
sim_weight = lch_similarity(current_synset, neighbor_synset)
lch_sim_dict[nodes] = sim_weight
elif current_synset.pos() == 'n' and neighbor_synset.pos() == 'n':
sim_weight = jcn_similarity(current_synset, neighbor_synset)
jcn_sim_dict[nodes] = sim_weight
elif USE_LESK:
sim_weight = lesk_similarity(current_synset, neighbor_synset)
lesk_sim_dict[nodes] = sim_weight
i+=1
#normalize the similarity weights and build edges
if lch_sim_dict:
max_lch_score = max(lch_sim_dict.values())
for key in lch_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lch_sim_dict[key]/max_lch_score))
if jcn_sim_dict:
max_jcn_score = max(jcn_sim_dict.values())
for key in jcn_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(jcn_sim_dict[key]/max_jcn_score))
if USE_LESK:
if lesk_sim_dict:
max_lesk_score = max(lesk_sim_dict.values())
if max_lesk_score > 0:
for key in lesk_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lesk_sim_dict[key]/LESK_NORM_FACTOR))
#compute graph centrality
node_scores = dict()
if USE_PAGERANK:
node_scores = nx.pagerank(G)
else:
node_scores = G.degree(G.nodes(), "weight")
for token_id in ids_list:
nodeNames = token_nodeNames_map.get(token_id)
scores = []
max_label = ""
wordnet_key = ""
if nodeNames:
for nodeName in nodeNames:
scores.append(node_scores[nodeName])
if scores:
max_index = max(range(len(scores)), key=scores.__getitem__)
max_label = nodeNames[max_index]
if max_label:
i = max_label.find(' ')
lemmas = wn.synset(max_label[i+1:]).lemmas()
for lemma in lemmas:
wordnet_key += lemma.key()+';'
wordnet_key = wordnet_key[0:-1]
output_dict[token_id] = wordnet_key
#add the weight as attribute to the nodes of the graph
#for node in node_scores.keys():
# G.node[node]['weight']=node_scores[node]
counter += 1
if counter==1: #draw the graph of the first sentence
plt.close()
nx.draw(G, pos=G_pos, with_labels = True)
plt.show()
G.clear()
return output_dict
def load_senseval_data(file_path):
tokens_dict = OrderedDict()
pos_dict = OrderedDict()
sentences = []
pos_list = []
tree = ET.parse(file_path)
root = tree.getroot()
for text in root:
for sentence in text:
for word in sentence:
if word.tag == 'instance' and word.attrib['id']: #only include words with the <instance> tag
tokens_dict[word.attrib['id']] = word.text
pos_dict[word.attrib['id']] = word.attrib['pos']
if tokens_dict:
sentences.append(tokens_dict)
pos_list.append(pos_dict)
tokens_dict = dict()
pos_dict = dict()
return sentences, pos_list
if __name__ == "__main__":
sents, poses = load_senseval_data(senseval_fpath)
output_dict = sentence_wsd(sents, poses)
#load the gold results
with codecs.open(gold_tags_fpath, 'r', 'utf-8') as f:
lines = f.readlines()
wsd_output = []
gold_output = []
for line in lines:
id_key_pair = line.split()
predicted_keys = output_dict[id_key_pair[0]].split(';')
gold_keys_set = set(id_key_pair[1:])
predected_keys_set = set(predicted_keys)
if len(predected_keys_set.intersection(gold_keys_set)) > 0:
wsd_output.append(predicted_keys[0])
gold_output.append(predicted_keys[0])
else:
wsd_output.append(predicted_keys[0])
gold_output.append(id_key_pair[1])
assert len(wsd_output) == len(gold_output)
f1 = f1_score(gold_output, wsd_output, average=AVG_METHOD)
precision = precision_score(gold_output, wsd_output, average=AVG_METHOD)
recall = recall_score(gold_output, wsd_output, average=AVG_METHOD)
print ('F-score: %1.4f' % f1, ' Precision: %1.4f' % precision, ' Recall: %1.4f' % recall)
| true | true |
f72ffbe57a59f3600333518d118a3d3eda4b5c23 | 5,497 | py | Python | python_src/mysetup.py | softmatterlab/DeepTrack-2.0-app | 3bc661987cba53519ebefcc0b7221994a6e2d317 | [
"MIT"
] | null | null | null | python_src/mysetup.py | softmatterlab/DeepTrack-2.0-app | 3bc661987cba53519ebefcc0b7221994a6e2d317 | [
"MIT"
] | 6 | 2020-10-27T15:50:49.000Z | 2021-10-19T14:37:47.000Z | python_src/mysetup.py | softmatterlab/DeepTrack-2.0-app | 3bc661987cba53519ebefcc0b7221994a6e2d317 | [
"MIT"
] | 3 | 2020-10-16T11:04:42.000Z | 2021-10-19T14:26:52.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by: python.exe -m py2exe myscript.py -W mysetup.py
from distutils.core import setup
import py2exe
class Target(object):
'''Target is the baseclass for all executables that are created.
It defines properties that are shared by all of them.
'''
def __init__(self, **kw):
self.__dict__.update(kw)
# the VersionInfo resource, uncomment and fill in those items
# that make sense:
# The 'version' attribute MUST be defined, otherwise no versioninfo will be built:
# self.version = "1.0"
# self.company_name = "Company Name"
# self.copyright = "Copyright Company Name © 2013"
# self.legal_copyright = "Copyright Company Name © 2013"
# self.legal_trademark = ""
# self.product_version = "1.0.0.0"
# self.product_name = "Product Name"
# self.private_build = "foo"
# self.special_build = "bar"
def copy(self):
return Target(**self.__dict__)
def __setitem__(self, name, value):
self.__dict__[name] = value
RT_BITMAP = 2
RT_MANIFEST = 24
# A manifest which specifies the executionlevel
# and windows common-controls library version 6
manifest_template = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="*"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="%(level)s"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
myscript = Target(
# We can extend or override the VersionInfo of the base class:
# version = "1.0",
# file_description = "File Description",
# comments = "Some Comments",
# internal_name = "spam",
script="server.py", # path of the main script
# Allows to specify the basename of the executable, if different from 'myscript'
# dest_base = "myscript",
# Icon resources:[(resource_id, path to .ico file), ...]
# icon_resources=[(1, r"myscript.ico")]
other_resources = [(RT_MANIFEST, 1, (manifest_template % dict(prog="server", level="asInvoker")).encode("utf-8")),
# for bitmap resources, the first 14 bytes must be skipped when reading the file:
# (RT_BITMAP, 1, open("bitmap.bmp", "rb").read()[14:]),
]
)
# ``zipfile`` and ``bundle_files`` options explained:
# ===================================================
#
# zipfile is the Python runtime library for your exe/dll-files; it
# contains in a ziparchive the modules needed as compiled bytecode.
#
# If 'zipfile=None' is used, the runtime library is appended to the
# exe/dll-files (which will then grow quite large), otherwise the
# zipfile option should be set to a pathname relative to the exe/dll
# files, and a library-file shared by all executables will be created.
#
# The py2exe runtime *can* use extension module by directly importing
# the from a zip-archive - without the need to unpack them to the file
# system. The bundle_files option specifies where the extension modules,
# the python dll itself, and other needed dlls are put.
#
# bundle_files == 3:
# Extension modules, the Python dll and other needed dlls are
# copied into the directory where the zipfile or the exe/dll files
# are created, and loaded in the normal way.
#
# bundle_files == 2:
# Extension modules are put into the library ziparchive and loaded
# from it directly.
# The Python dll and any other needed dlls are copied into the
# directory where the zipfile or the exe/dll files are created,
# and loaded in the normal way.
#
# bundle_files == 1:
# Extension modules and the Python dll are put into the zipfile or
# the exe/dll files, and everything is loaded without unpacking to
# the file system. This does not work for some dlls, so use with
# caution.
#
# bundle_files == 0:
# Extension modules, the Python dll, and other needed dlls are put
# into the zipfile or the exe/dll files, and everything is loaded
# without unpacking to the file system. This does not work for
# some dlls, so use with caution.
py2exe_options = dict(
packages = [],
## excludes = "tof_specials Tkinter".split(),
## ignores = "dotblas gnosis.xml.pickle.parsers._cexpat mx.DateTime".split(),
## dll_excludes = "MSVCP90.dll mswsock.dll powrprof.dll".split(),
optimize=0,
compressed=False, # uncompressed may or may not have a faster startup
bundle_files=3,
dist_dir='dist',
)
# Some options can be overridden by command line options...
setup(name="name",
# console based executables
console=[myscript],
# windows subsystem executables (no console)
windows=[],
# py2exe options
zipfile=None,
options={"py2exe": py2exe_options},
)
| 32.720238 | 118 | 0.65272 |
from distutils.core import setup
import py2exe
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def copy(self):
return Target(**self.__dict__)
def __setitem__(self, name, value):
self.__dict__[name] = value
RT_BITMAP = 2
RT_MANIFEST = 24
manifest_template = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="*"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="%(level)s"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
myscript = Target(
script="server.py",
other_resources = [(RT_MANIFEST, 1, (manifest_template % dict(prog="server", level="asInvoker")).encode("utf-8")),
]
)
py2exe_options = dict(
packages = [],
None,
options={"py2exe": py2exe_options},
)
| true | true |
f72ffc1214ecd15928542a7ea9a9182e72d89e05 | 798 | py | Python | create_tables.py | RammySekham/Creating-Cloud-Datawarehouse | 62a92a225c3b59d0fed118453651159ccdf8ff38 | [
"MIT"
] | null | null | null | create_tables.py | RammySekham/Creating-Cloud-Datawarehouse | 62a92a225c3b59d0fed118453651159ccdf8ff38 | [
"MIT"
] | null | null | null | create_tables.py | RammySekham/Creating-Cloud-Datawarehouse | 62a92a225c3b59d0fed118453651159ccdf8ff38 | [
"MIT"
] | null | null | null | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
'''
Drop the existing tables
'''
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
'''
Create the tables as specified in queries
'''
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 19 | 112 | 0.631579 | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | true | true |
f72ffc4b88ea7b559670f1d9a1678141ddbe338d | 4,559 | py | Python | selfdrive/controls/lib/latcontrol_torque.py | salah608/OPENPILOT | be214b44947d2a52571b1031c25dde5d54a5fe10 | [
"MIT"
] | 1 | 2022-03-31T05:07:44.000Z | 2022-03-31T05:07:44.000Z | selfdrive/controls/lib/latcontrol_torque.py | salah608/OPENPILOT | be214b44947d2a52571b1031c25dde5d54a5fe10 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/latcontrol_torque.py | salah608/OPENPILOT | be214b44947d2a52571b1031c25dde5d54a5fe10 | [
"MIT"
] | 1 | 2019-07-04T05:35:42.000Z | 2019-07-04T05:35:42.000Z | import math
from cereal import log
from common.numpy_fast import interp
from selfdrive.controls.lib.latcontrol import LatControl, MIN_STEER_SPEED
from selfdrive.controls.lib.pid import PIDController
from selfdrive.controls.lib.drive_helpers import apply_deadzone
from selfdrive.controls.lib.vehicle_model import ACCELERATION_DUE_TO_GRAVITY
# At higher speeds (25+mph) we can assume:
# Lateral acceleration achieved by a specific car correlates to
# torque applied to the steering rack. It does not correlate to
# wheel slip, or to speed.
# This controller applies torque to achieve desired lateral
# accelerations. To compensate for the low speed effects we
# use a LOW_SPEED_FACTOR in the error. Additionally, there is
# friction in the steering wheel that needs to be overcome to
# move it at all, this is compensated for too.
FRICTION_THRESHOLD = 0.2
def set_torque_tune(tune, MAX_LAT_ACCEL=2.5, FRICTION=0.01, steering_angle_deadzone_deg=0.0):
tune.init('torque')
tune.torque.useSteeringAngle = True
tune.torque.kp = 1.0 / MAX_LAT_ACCEL
tune.torque.kf = 1.0 / MAX_LAT_ACCEL
tune.torque.ki = 0.1 / MAX_LAT_ACCEL
tune.torque.friction = FRICTION
tune.torque.steeringAngleDeadzoneDeg = steering_angle_deadzone_deg
class LatControlTorque(LatControl):
def __init__(self, CP, CI):
super().__init__(CP, CI)
self.pid = PIDController(CP.lateralTuning.torque.kp, CP.lateralTuning.torque.ki,
k_f=CP.lateralTuning.torque.kf, pos_limit=self.steer_max, neg_limit=-self.steer_max)
self.get_steer_feedforward = CI.get_steer_feedforward_function()
self.use_steering_angle = CP.lateralTuning.torque.useSteeringAngle
self.friction = CP.lateralTuning.torque.friction
self.kf = CP.lateralTuning.torque.kf
self.steering_angle_deadzone_deg = CP.lateralTuning.torque.steeringAngleDeadzoneDeg
def update(self, active, CS, VM, params, last_actuators, desired_curvature, desired_curvature_rate, llk):
pid_log = log.ControlsState.LateralTorqueState.new_message()
if CS.vEgo < MIN_STEER_SPEED or not active:
output_torque = 0.0
pid_log.active = False
else:
if self.use_steering_angle:
actual_curvature = -VM.calc_curvature(math.radians(CS.steeringAngleDeg - params.angleOffsetDeg), CS.vEgo, params.roll)
curvature_deadzone = abs(VM.calc_curvature(math.radians(self.steering_angle_deadzone_deg), CS.vEgo, 0.0))
else:
actual_curvature_vm = -VM.calc_curvature(math.radians(CS.steeringAngleDeg - params.angleOffsetDeg), CS.vEgo, params.roll)
actual_curvature_llk = llk.angularVelocityCalibrated.value[2] / CS.vEgo
actual_curvature = interp(CS.vEgo, [2.0, 5.0], [actual_curvature_vm, actual_curvature_llk])
curvature_deadzone = 0.0
desired_lateral_accel = desired_curvature * CS.vEgo ** 2
# desired rate is the desired rate of change in the setpoint, not the absolute desired curvature
#desired_lateral_jerk = desired_curvature_rate * CS.vEgo ** 2
actual_lateral_accel = actual_curvature * CS.vEgo ** 2
lateral_accel_deadzone = curvature_deadzone * CS.vEgo ** 2
low_speed_factor = interp(CS.vEgo, [0, 15], [500, 0])
setpoint = desired_lateral_accel + low_speed_factor * desired_curvature
measurement = actual_lateral_accel + low_speed_factor * actual_curvature
error = apply_deadzone(setpoint - measurement, lateral_accel_deadzone)
pid_log.error = error
ff = desired_lateral_accel - params.roll * ACCELERATION_DUE_TO_GRAVITY
# convert friction into lateral accel units for feedforward
friction_compensation = interp(error, [-FRICTION_THRESHOLD, FRICTION_THRESHOLD], [-self.friction, self.friction])
ff += friction_compensation / self.kf
freeze_integrator = CS.steeringRateLimited or CS.steeringPressed or CS.vEgo < 5
output_torque = self.pid.update(error,
feedforward=ff,
speed=CS.vEgo,
freeze_integrator=freeze_integrator)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.d = self.pid.d
pid_log.f = self.pid.f
pid_log.output = -output_torque
pid_log.saturated = self._check_saturation(self.steer_max - abs(output_torque) < 1e-3, CS)
pid_log.actualLateralAccel = actual_lateral_accel
pid_log.desiredLateralAccel = desired_lateral_accel
# TODO left is positive in this convention
return -output_torque, 0.0, pid_log
| 47 | 129 | 0.733055 | import math
from cereal import log
from common.numpy_fast import interp
from selfdrive.controls.lib.latcontrol import LatControl, MIN_STEER_SPEED
from selfdrive.controls.lib.pid import PIDController
from selfdrive.controls.lib.drive_helpers import apply_deadzone
from selfdrive.controls.lib.vehicle_model import ACCELERATION_DUE_TO_GRAVITY
FRICTION_THRESHOLD = 0.2
def set_torque_tune(tune, MAX_LAT_ACCEL=2.5, FRICTION=0.01, steering_angle_deadzone_deg=0.0):
tune.init('torque')
tune.torque.useSteeringAngle = True
tune.torque.kp = 1.0 / MAX_LAT_ACCEL
tune.torque.kf = 1.0 / MAX_LAT_ACCEL
tune.torque.ki = 0.1 / MAX_LAT_ACCEL
tune.torque.friction = FRICTION
tune.torque.steeringAngleDeadzoneDeg = steering_angle_deadzone_deg
class LatControlTorque(LatControl):
def __init__(self, CP, CI):
super().__init__(CP, CI)
self.pid = PIDController(CP.lateralTuning.torque.kp, CP.lateralTuning.torque.ki,
k_f=CP.lateralTuning.torque.kf, pos_limit=self.steer_max, neg_limit=-self.steer_max)
self.get_steer_feedforward = CI.get_steer_feedforward_function()
self.use_steering_angle = CP.lateralTuning.torque.useSteeringAngle
self.friction = CP.lateralTuning.torque.friction
self.kf = CP.lateralTuning.torque.kf
self.steering_angle_deadzone_deg = CP.lateralTuning.torque.steeringAngleDeadzoneDeg
def update(self, active, CS, VM, params, last_actuators, desired_curvature, desired_curvature_rate, llk):
pid_log = log.ControlsState.LateralTorqueState.new_message()
if CS.vEgo < MIN_STEER_SPEED or not active:
output_torque = 0.0
pid_log.active = False
else:
if self.use_steering_angle:
actual_curvature = -VM.calc_curvature(math.radians(CS.steeringAngleDeg - params.angleOffsetDeg), CS.vEgo, params.roll)
curvature_deadzone = abs(VM.calc_curvature(math.radians(self.steering_angle_deadzone_deg), CS.vEgo, 0.0))
else:
actual_curvature_vm = -VM.calc_curvature(math.radians(CS.steeringAngleDeg - params.angleOffsetDeg), CS.vEgo, params.roll)
actual_curvature_llk = llk.angularVelocityCalibrated.value[2] / CS.vEgo
actual_curvature = interp(CS.vEgo, [2.0, 5.0], [actual_curvature_vm, actual_curvature_llk])
curvature_deadzone = 0.0
desired_lateral_accel = desired_curvature * CS.vEgo ** 2
actual_lateral_accel = actual_curvature * CS.vEgo ** 2
lateral_accel_deadzone = curvature_deadzone * CS.vEgo ** 2
low_speed_factor = interp(CS.vEgo, [0, 15], [500, 0])
setpoint = desired_lateral_accel + low_speed_factor * desired_curvature
measurement = actual_lateral_accel + low_speed_factor * actual_curvature
error = apply_deadzone(setpoint - measurement, lateral_accel_deadzone)
pid_log.error = error
ff = desired_lateral_accel - params.roll * ACCELERATION_DUE_TO_GRAVITY
friction_compensation = interp(error, [-FRICTION_THRESHOLD, FRICTION_THRESHOLD], [-self.friction, self.friction])
ff += friction_compensation / self.kf
freeze_integrator = CS.steeringRateLimited or CS.steeringPressed or CS.vEgo < 5
output_torque = self.pid.update(error,
feedforward=ff,
speed=CS.vEgo,
freeze_integrator=freeze_integrator)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.d = self.pid.d
pid_log.f = self.pid.f
pid_log.output = -output_torque
pid_log.saturated = self._check_saturation(self.steer_max - abs(output_torque) < 1e-3, CS)
pid_log.actualLateralAccel = actual_lateral_accel
pid_log.desiredLateralAccel = desired_lateral_accel
return -output_torque, 0.0, pid_log
| true | true |
f72ffc7299c2bfe078237263c5c34c71dccfc1d9 | 250 | py | Python | manage.py | muhiza/bestb | 3c25db0b31c736a59e6a6623615da50a1ab5f196 | [
"MIT"
] | 110 | 2016-11-25T14:25:10.000Z | 2022-02-16T08:25:57.000Z | manage.py | muhiza/bestb | 3c25db0b31c736a59e6a6623615da50a1ab5f196 | [
"MIT"
] | 86 | 2016-11-13T10:04:07.000Z | 2022-03-11T23:14:01.000Z | manage.py | muhiza/bestb | 3c25db0b31c736a59e6a6623615da50a1ab5f196 | [
"MIT"
] | 21 | 2016-12-06T15:03:44.000Z | 2021-12-30T11:38:19.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sciblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.727273 | 71 | 0.772 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sciblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
f72ffcc2bb7815a6350f846fc32861403f679efd | 7 | py | Python | tests/unit/conftest.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/conftest.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/conftest.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | # | 7 | 7 | 0 | true | true | |
f72ffd82aa9586a214a2d9cf5db0f17af8e80cc5 | 1,026 | py | Python | tests/helpers.py | der-gabe/pynonymizer | 3e53bb1f27c2446672f7c2794009354dc8d95ace | [
"MIT"
] | null | null | null | tests/helpers.py | der-gabe/pynonymizer | 3e53bb1f27c2446672f7c2794009354dc8d95ace | [
"MIT"
] | null | null | null | tests/helpers.py | der-gabe/pynonymizer | 3e53bb1f27c2446672f7c2794009354dc8d95ace | [
"MIT"
] | null | null | null | import re
import pytest
from contextlib import contextmanager
class AnyObject:
def __eq__(self, actual):
return True
def __ne__(self, other):
return False
class SuperdictOf:
def __init__(self, required_dict):
self.required_dict = required_dict
def __eq__(self, actual):
return self.required_dict.items() <= actual.items()
def __ne__(self, actual):
return not(self.required_dict.items() <= actual.items())
class ComparableRegex:
"""Assert that a given string meets some expectations."""
def __init__(self, pattern, flags=0):
self._regex = re.compile(pattern, flags)
def __eq__(self, actual):
return bool(self._regex.match(actual))
def __repr__(self):
return self._regex.pattern
@contextmanager
def not_raises(exception):
try:
yield
except exception:
raise pytest.fail("DID RAISE {0}".format(exception))
def list_rindex(alist, value):
return len(alist) - alist[-1::-1].index(value) - 1 | 21.375 | 64 | 0.665692 | import re
import pytest
from contextlib import contextmanager
class AnyObject:
def __eq__(self, actual):
return True
def __ne__(self, other):
return False
class SuperdictOf:
def __init__(self, required_dict):
self.required_dict = required_dict
def __eq__(self, actual):
return self.required_dict.items() <= actual.items()
def __ne__(self, actual):
return not(self.required_dict.items() <= actual.items())
class ComparableRegex:
def __init__(self, pattern, flags=0):
self._regex = re.compile(pattern, flags)
def __eq__(self, actual):
return bool(self._regex.match(actual))
def __repr__(self):
return self._regex.pattern
@contextmanager
def not_raises(exception):
try:
yield
except exception:
raise pytest.fail("DID RAISE {0}".format(exception))
def list_rindex(alist, value):
return len(alist) - alist[-1::-1].index(value) - 1 | true | true |
f72ffe71d2e1e836e254e19dc8302d96f10fbef4 | 12,747 | py | Python | Examples/Tests/PythonWrappers/PICMI_inputs_2d.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | null | null | null | Examples/Tests/PythonWrappers/PICMI_inputs_2d.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | null | null | null | Examples/Tests/PythonWrappers/PICMI_inputs_2d.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from pywarpx import picmi
# Number of time steps
max_steps = 100
# Grid
nx = 128
nz = 128
# Domain
xmin = 0.e-6
zmin = 0.e-6
xmax = 50.e-6
zmax = 50.e-6
# Cell size
dx = (xmax - xmin) / nx
dz = (zmax - zmin) / nz
# Domain decomposition
max_grid_size_x = 64
max_grid_size_z = 64
# PML
nxpml = 10
nzpml = 10
field_boundary = ['open', 'open']
# Spectral order
nox = 8
noz = 8
# Guard cells
nxg = 8
nzg = 8
# Initialize grid
grid = picmi.Cartesian2DGrid(number_of_cells = [nx,nz],
lower_bound = [xmin,zmin],
upper_bound = [xmax,zmax],
lower_boundary_conditions = field_boundary,
upper_boundary_conditions = field_boundary,
guard_cells = [nxg,nzg],
moving_window_velocity = [0.,0.,0],
warpx_max_grid_size_x = max_grid_size_x,
warpx_max_grid_size_y = max_grid_size_z)
# Initialize field solver
solver = picmi.ElectromagneticSolver(grid=grid, cfl=0.95, method='PSATD',
stencil_order = [nox,noz],
divE_cleaning = 1,
divB_cleaning = 1,
pml_divE_cleaning = 1,
pml_divB_cleaning = 1,
warpx_psatd_update_with_rho = True)
# Initialize diagnostics
diag_field_list = ["E", "B"]
field_diag = picmi.FieldDiagnostic(name = 'diag1',
grid = grid,
period = 10,
write_dir = '.',
warpx_file_prefix = 'Python_wrappers_plt',
data_list = diag_field_list)
# Initialize simulation
sim = picmi.Simulation(solver = solver,
max_steps = max_steps,
verbose = 1,
particle_shape = 'cubic',
warpx_current_deposition_algo = 'direct',
warpx_particle_pusher_algo = 'boris',
warpx_field_gathering_algo = 'energy-conserving',
warpx_use_filter = 1)
# Add diagnostics to simulation
sim.add_diagnostic(field_diag)
# Write input file to run with compiled version
sim.write_input_file(file_name = 'inputs_2d')
# Whether to include guard cells in data returned by Python wrappers
include_ghosts = 1
# Compute min and max of fields data
def compute_minmax(data):
vmax = np.abs(data).max()
vmin = -vmax
return vmin, vmax
# Plot fields data either in valid domain or in PML
def plot_data(data, pml, title, name):
fig, ax = plt.subplots(nrows = 1, ncols = 1, gridspec_kw = dict(wspace = 0.5), figsize = [6,5])
cax = make_axes_locatable(ax).append_axes('right', size='5%', pad='5%')
lw = 0.8
ls = '--'
if pml:
# Draw PMLs and ghost regions
ax.axvline(x = 0 , linewidth = lw, linestyle = ls)
ax.axvline(x = 0+nxg , linewidth = lw, linestyle = ls)
ax.axvline(x = -nxpml , linewidth = lw, linestyle = ls)
ax.axvline(x = nx , linewidth = lw, linestyle = ls)
ax.axvline(x = nx-nxg , linewidth = lw, linestyle = ls)
ax.axvline(x = nx+nxpml, linewidth = lw, linestyle = ls)
ax.axhline(y = 0 , linewidth = lw, linestyle = ls)
ax.axhline(y = 0+nzg , linewidth = lw, linestyle = ls)
ax.axhline(y = -nzpml , linewidth = lw, linestyle = ls)
ax.axhline(y = nz , linewidth = lw, linestyle = ls)
ax.axhline(y = nz-nzg , linewidth = lw, linestyle = ls)
ax.axhline(y = nz+nzpml, linewidth = lw, linestyle = ls)
# Annotations
ax.annotate('PML', xy = (-nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx+nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx//2,-nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx//2,nz+nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (-nxpml-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx+nxpml+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,-nzpml-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nz-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nz+nzpml+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
# Set extent and sliced data
extent = np.array([-nxg-nxpml, nx+nxpml+nxg, -nzg-nzpml, nz+nzpml+nzg])
else:
# Draw ghost regions
ax.axvline(x = 0 , linewidth = lw, linestyle = ls)
ax.axvline(x = nx, linewidth = lw, linestyle = ls)
ax.axhline(y = 0 , linewidth = lw, linestyle = ls)
ax.axhline(y = nz, linewidth = lw, linestyle = ls)
# Annotations
ax.annotate('ghost', xy = (-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx//2,-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx//2,nz+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
# Set extent and sliced data
extent = np.array([-nxg, nx+nxg, -nzg, nz+nzg])
X = data[:,:].transpose()
# Min and max for colorbar
vmin, vmax = compute_minmax(X)
# Display data as image
im = ax.imshow(X = X, origin = 'lower', extent = extent, vmin = vmin, vmax = vmax, cmap = 'seismic')
# Add colorbar to plot
fig.colorbar(im, cax = cax)
# Set label for x- and y-axis, set title
ax.set_xlabel('x')
ax.set_ylabel('z')
ax.set_title(title)
# Set plot title
suptitle = 'PML in (x,z), 4 grids 64 x 64'
plt.suptitle(suptitle)
# Save figure
figname = 'figure_' + name + '.png'
fig.savefig(figname, dpi = 100)
# Initialize fields data (unit pulse) and apply smoothing
def init_data(data):
impulse_1d = np.array([1./4., 1./2., 1./4.])
impulse = np.outer(impulse_1d, impulse_1d)
data[nx//2-1:nx//2+2,nz//2-1:nz//2+2] = impulse
# Initialize inputs and WarpX instance
sim.initialize_inputs()
sim.initialize_warpx()
# Get fields data using Python wrappers
import pywarpx.fields as pwxf
Ex = pwxf.ExFPWrapper(include_ghosts = include_ghosts)
Ey = pwxf.EyFPWrapper(include_ghosts = include_ghosts)
Ez = pwxf.EzFPWrapper(include_ghosts = include_ghosts)
Bx = pwxf.BxFPWrapper(include_ghosts = include_ghosts)
By = pwxf.ByFPWrapper(include_ghosts = include_ghosts)
Bz = pwxf.BzFPWrapper(include_ghosts = include_ghosts)
F = pwxf.FFPWrapper(include_ghosts = include_ghosts)
G = pwxf.GFPWrapper(include_ghosts = include_ghosts)
Expml = pwxf.ExFPPMLWrapper(include_ghosts = include_ghosts)
Eypml = pwxf.EyFPPMLWrapper(include_ghosts = include_ghosts)
Ezpml = pwxf.EzFPPMLWrapper(include_ghosts = include_ghosts)
Bxpml = pwxf.BxFPPMLWrapper(include_ghosts = include_ghosts)
Bypml = pwxf.ByFPPMLWrapper(include_ghosts = include_ghosts)
Bzpml = pwxf.BzFPPMLWrapper(include_ghosts = include_ghosts)
Fpml = pwxf.FFPPMLWrapper(include_ghosts = include_ghosts)
Gpml = pwxf.GFPPMLWrapper(include_ghosts = include_ghosts)
# Initialize fields data in valid domain
init_data(Ex)
init_data(Ey)
init_data(Ez)
init_data(Bx)
init_data(By)
init_data(Bz)
init_data(F)
init_data(G)
# Advance simulation until last time step
sim.step(max_steps)
# Plot E
plot_data(Ex, pml = False, title = 'Ex', name = 'Ex')
plot_data(Ey, pml = False, title = 'Ey', name = 'Ey')
plot_data(Ez, pml = False, title = 'Ez', name = 'Ez')
# Plot B
plot_data(Bx, pml = False, title = 'Bx', name = 'Bx')
plot_data(By, pml = False, title = 'By', name = 'By')
plot_data(Bz, pml = False, title = 'Bz', name = 'Bz')
# F and G
plot_data(F, pml = False, title = 'F', name = 'F')
plot_data(G, pml = False, title = 'G', name = 'G')
# Plot E in PML
plot_data(Expml[:,:,0], pml = True, title = 'Exy in PML', name = 'Exy')
plot_data(Expml[:,:,1], pml = True, title = 'Exz in PML', name = 'Exz')
plot_data(Expml[:,:,2], pml = True, title = 'Exx in PML', name = 'Exx')
plot_data(Eypml[:,:,0], pml = True, title = 'Eyz in PML', name = 'Eyz')
plot_data(Eypml[:,:,1], pml = True, title = 'Eyx in PML', name = 'Eyx')
plot_data(Eypml[:,:,2], pml = True, title = 'Eyy in PML', name = 'Eyy') # zero
plot_data(Ezpml[:,:,0], pml = True, title = 'Ezx in PML', name = 'Ezx')
plot_data(Ezpml[:,:,1], pml = True, title = 'Ezy in PML', name = 'Ezy') # zero
plot_data(Ezpml[:,:,2], pml = True, title = 'Ezz in PML', name = 'Ezz')
# Plot B in PML
plot_data(Bxpml[:,:,0], pml = True, title = 'Bxy in PML', name = 'Bxy')
plot_data(Bxpml[:,:,1], pml = True, title = 'Bxz in PML', name = 'Bxz')
plot_data(Bxpml[:,:,2], pml = True, title = 'Bxx in PML', name = 'Bxx')
plot_data(Bypml[:,:,0], pml = True, title = 'Byz in PML', name = 'Byz')
plot_data(Bypml[:,:,1], pml = True, title = 'Byx in PML', name = 'Byx')
plot_data(Bypml[:,:,2], pml = True, title = 'Byy in PML', name = 'Byy') # zero
plot_data(Bzpml[:,:,0], pml = True, title = 'Bzx in PML', name = 'Bzx')
plot_data(Bzpml[:,:,1], pml = True, title = 'Bzy in PML', name = 'Bzy') # zero
plot_data(Bzpml[:,:,2], pml = True, title = 'Bzz in PML', name = 'Bzz')
# Plot F and G in PML
plot_data(Fpml[:,:,0], pml = True, title = 'Fx in PML', name = 'Fx')
plot_data(Fpml[:,:,1], pml = True, title = 'Fy in PML', name = 'Fy')
plot_data(Fpml[:,:,2], pml = True, title = 'Fz in PML', name = 'Fz')
plot_data(Gpml[:,:,0], pml = True, title = 'Gx in PML', name = 'Gx')
plot_data(Gpml[:,:,1], pml = True, title = 'Gy in PML', name = 'Gy')
plot_data(Gpml[:,:,2], pml = True, title = 'Gz in PML', name = 'Gz')
# Check values with benchmarks (precomputed from the same Python arrays)
def check_values(benchmark, data, rtol, atol):
passed = np.allclose(benchmark, np.sum(np.abs(data[:,:])), rtol = rtol, atol = atol)
assert(passed)
rtol = 1e-09
atol = 1e-12
# E
check_values(1013263608.6369569, Ex[:,:], rtol, atol)
check_values(717278253.4505507 , Ey[:,:], rtol, atol)
check_values(717866566.5718911 , Ez[:,:], rtol, atol)
# B
check_values(3.0214509313437636, Bx[:,:], rtol, atol)
check_values(3.0242765102729985, By[:,:], rtol, atol)
check_values(3.0214509326970465, Bz[:,:], rtol, atol)
# F and G
check_values(3.0188584528062377, F[:,:], rtol, atol)
check_values(1013672631.8764204, G[:,:], rtol, atol)
# E in PML
check_values(364287936.1526477 , Expml[:,:,0], rtol, atol)
check_values(183582351.3212558 , Expml[:,:,1], rtol, atol)
check_values(190065766.41491824, Expml[:,:,2], rtol, atol)
check_values(440581905.9336025 , Eypml[:,:,0], rtol, atol)
check_values(178117293.6629357 , Eypml[:,:,1], rtol, atol)
check_values(0.0 , Eypml[:,:,2], rtol, atol)
check_values(430277101.26568377, Ezpml[:,:,0], rtol, atol)
check_values(0.0 , Ezpml[:,:,1], rtol, atol)
check_values(190919663.2167449 , Ezpml[:,:,2], rtol, atol)
# B in PML
check_values(1.0565189315366146 , Bxpml[:,:,0], rtol, atol)
check_values(0.4618191395098556 , Bxpml[:,:,1], rtol, atol)
check_values(0.6849858273929585 , Bxpml[:,:,2], rtol, atol)
check_values(1.7228584190213505 , Bypml[:,:,0], rtol, atol)
check_values(0.47697331996765685, Bypml[:,:,1], rtol, atol)
check_values(0.0 , Bypml[:,:,2], rtol, atol)
check_values(1.5183380774611628 , Bzpml[:,:,0], rtol, atol)
check_values(0.0 , Bzpml[:,:,1], rtol, atol)
check_values(0.6849858291863835 , Bzpml[:,:,2], rtol, atol)
# F and G in PML
check_values(1.7808748509425263, Fpml[:,:,0], rtol, atol)
check_values(0.0 , Fpml[:,:,1], rtol, atol)
check_values(0.4307845604625681, Fpml[:,:,2], rtol, atol)
check_values(536552745.42701197, Gpml[:,:,0], rtol, atol)
check_values(0.0 , Gpml[:,:,1], rtol, atol)
check_values(196016270.97767758, Gpml[:,:,2], rtol, atol)
| 43.65411 | 117 | 0.607045 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from pywarpx import picmi
max_steps = 100
nx = 128
nz = 128
xmin = 0.e-6
zmin = 0.e-6
xmax = 50.e-6
zmax = 50.e-6
dx = (xmax - xmin) / nx
dz = (zmax - zmin) / nz
max_grid_size_x = 64
max_grid_size_z = 64
nxpml = 10
nzpml = 10
field_boundary = ['open', 'open']
nox = 8
noz = 8
nxg = 8
nzg = 8
grid = picmi.Cartesian2DGrid(number_of_cells = [nx,nz],
lower_bound = [xmin,zmin],
upper_bound = [xmax,zmax],
lower_boundary_conditions = field_boundary,
upper_boundary_conditions = field_boundary,
guard_cells = [nxg,nzg],
moving_window_velocity = [0.,0.,0],
warpx_max_grid_size_x = max_grid_size_x,
warpx_max_grid_size_y = max_grid_size_z)
solver = picmi.ElectromagneticSolver(grid=grid, cfl=0.95, method='PSATD',
stencil_order = [nox,noz],
divE_cleaning = 1,
divB_cleaning = 1,
pml_divE_cleaning = 1,
pml_divB_cleaning = 1,
warpx_psatd_update_with_rho = True)
diag_field_list = ["E", "B"]
field_diag = picmi.FieldDiagnostic(name = 'diag1',
grid = grid,
period = 10,
write_dir = '.',
warpx_file_prefix = 'Python_wrappers_plt',
data_list = diag_field_list)
sim = picmi.Simulation(solver = solver,
max_steps = max_steps,
verbose = 1,
particle_shape = 'cubic',
warpx_current_deposition_algo = 'direct',
warpx_particle_pusher_algo = 'boris',
warpx_field_gathering_algo = 'energy-conserving',
warpx_use_filter = 1)
sim.add_diagnostic(field_diag)
sim.write_input_file(file_name = 'inputs_2d')
include_ghosts = 1
def compute_minmax(data):
vmax = np.abs(data).max()
vmin = -vmax
return vmin, vmax
def plot_data(data, pml, title, name):
fig, ax = plt.subplots(nrows = 1, ncols = 1, gridspec_kw = dict(wspace = 0.5), figsize = [6,5])
cax = make_axes_locatable(ax).append_axes('right', size='5%', pad='5%')
lw = 0.8
ls = '--'
if pml:
ax.axvline(x = 0 , linewidth = lw, linestyle = ls)
ax.axvline(x = 0+nxg , linewidth = lw, linestyle = ls)
ax.axvline(x = -nxpml , linewidth = lw, linestyle = ls)
ax.axvline(x = nx , linewidth = lw, linestyle = ls)
ax.axvline(x = nx-nxg , linewidth = lw, linestyle = ls)
ax.axvline(x = nx+nxpml, linewidth = lw, linestyle = ls)
ax.axhline(y = 0 , linewidth = lw, linestyle = ls)
ax.axhline(y = 0+nzg , linewidth = lw, linestyle = ls)
ax.axhline(y = -nzpml , linewidth = lw, linestyle = ls)
ax.axhline(y = nz , linewidth = lw, linestyle = ls)
ax.axhline(y = nz-nzg , linewidth = lw, linestyle = ls)
ax.axhline(y = nz+nzpml, linewidth = lw, linestyle = ls)
ax.annotate('PML', xy = (-nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx+nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx//2,-nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML', xy = (nx//2,nz+nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (-nxpml-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx+nxpml+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,-nzpml-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nz-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('PML ghost', xy = (nx//2,nz+nzpml+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
extent = np.array([-nxg-nxpml, nx+nxpml+nxg, -nzg-nzpml, nz+nzpml+nzg])
else:
ax.axvline(x = 0 , linewidth = lw, linestyle = ls)
ax.axvline(x = nx, linewidth = lw, linestyle = ls)
ax.axhline(y = 0 , linewidth = lw, linestyle = ls)
ax.axhline(y = nz, linewidth = lw, linestyle = ls)
ax.annotate('ghost', xy = (-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx//2,-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
ax.annotate('ghost', xy = (nx//2,nz+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')
extent = np.array([-nxg, nx+nxg, -nzg, nz+nzg])
X = data[:,:].transpose()
vmin, vmax = compute_minmax(X)
im = ax.imshow(X = X, origin = 'lower', extent = extent, vmin = vmin, vmax = vmax, cmap = 'seismic')
fig.colorbar(im, cax = cax)
ax.set_xlabel('x')
ax.set_ylabel('z')
ax.set_title(title)
suptitle = 'PML in (x,z), 4 grids 64 x 64'
plt.suptitle(suptitle)
figname = 'figure_' + name + '.png'
fig.savefig(figname, dpi = 100)
def init_data(data):
impulse_1d = np.array([1./4., 1./2., 1./4.])
impulse = np.outer(impulse_1d, impulse_1d)
data[nx//2-1:nx//2+2,nz//2-1:nz//2+2] = impulse
sim.initialize_inputs()
sim.initialize_warpx()
import pywarpx.fields as pwxf
Ex = pwxf.ExFPWrapper(include_ghosts = include_ghosts)
Ey = pwxf.EyFPWrapper(include_ghosts = include_ghosts)
Ez = pwxf.EzFPWrapper(include_ghosts = include_ghosts)
Bx = pwxf.BxFPWrapper(include_ghosts = include_ghosts)
By = pwxf.ByFPWrapper(include_ghosts = include_ghosts)
Bz = pwxf.BzFPWrapper(include_ghosts = include_ghosts)
F = pwxf.FFPWrapper(include_ghosts = include_ghosts)
G = pwxf.GFPWrapper(include_ghosts = include_ghosts)
Expml = pwxf.ExFPPMLWrapper(include_ghosts = include_ghosts)
Eypml = pwxf.EyFPPMLWrapper(include_ghosts = include_ghosts)
Ezpml = pwxf.EzFPPMLWrapper(include_ghosts = include_ghosts)
Bxpml = pwxf.BxFPPMLWrapper(include_ghosts = include_ghosts)
Bypml = pwxf.ByFPPMLWrapper(include_ghosts = include_ghosts)
Bzpml = pwxf.BzFPPMLWrapper(include_ghosts = include_ghosts)
Fpml = pwxf.FFPPMLWrapper(include_ghosts = include_ghosts)
Gpml = pwxf.GFPPMLWrapper(include_ghosts = include_ghosts)
init_data(Ex)
init_data(Ey)
init_data(Ez)
init_data(Bx)
init_data(By)
init_data(Bz)
init_data(F)
init_data(G)
sim.step(max_steps)
plot_data(Ex, pml = False, title = 'Ex', name = 'Ex')
plot_data(Ey, pml = False, title = 'Ey', name = 'Ey')
plot_data(Ez, pml = False, title = 'Ez', name = 'Ez')
plot_data(Bx, pml = False, title = 'Bx', name = 'Bx')
plot_data(By, pml = False, title = 'By', name = 'By')
plot_data(Bz, pml = False, title = 'Bz', name = 'Bz')
plot_data(F, pml = False, title = 'F', name = 'F')
plot_data(G, pml = False, title = 'G', name = 'G')
plot_data(Expml[:,:,0], pml = True, title = 'Exy in PML', name = 'Exy')
plot_data(Expml[:,:,1], pml = True, title = 'Exz in PML', name = 'Exz')
plot_data(Expml[:,:,2], pml = True, title = 'Exx in PML', name = 'Exx')
plot_data(Eypml[:,:,0], pml = True, title = 'Eyz in PML', name = 'Eyz')
plot_data(Eypml[:,:,1], pml = True, title = 'Eyx in PML', name = 'Eyx')
plot_data(Eypml[:,:,2], pml = True, title = 'Eyy in PML', name = 'Eyy')
plot_data(Ezpml[:,:,0], pml = True, title = 'Ezx in PML', name = 'Ezx')
plot_data(Ezpml[:,:,1], pml = True, title = 'Ezy in PML', name = 'Ezy')
plot_data(Ezpml[:,:,2], pml = True, title = 'Ezz in PML', name = 'Ezz')
plot_data(Bxpml[:,:,0], pml = True, title = 'Bxy in PML', name = 'Bxy')
plot_data(Bxpml[:,:,1], pml = True, title = 'Bxz in PML', name = 'Bxz')
plot_data(Bxpml[:,:,2], pml = True, title = 'Bxx in PML', name = 'Bxx')
plot_data(Bypml[:,:,0], pml = True, title = 'Byz in PML', name = 'Byz')
plot_data(Bypml[:,:,1], pml = True, title = 'Byx in PML', name = 'Byx')
plot_data(Bypml[:,:,2], pml = True, title = 'Byy in PML', name = 'Byy')
plot_data(Bzpml[:,:,0], pml = True, title = 'Bzx in PML', name = 'Bzx')
plot_data(Bzpml[:,:,1], pml = True, title = 'Bzy in PML', name = 'Bzy')
plot_data(Bzpml[:,:,2], pml = True, title = 'Bzz in PML', name = 'Bzz')
plot_data(Fpml[:,:,0], pml = True, title = 'Fx in PML', name = 'Fx')
plot_data(Fpml[:,:,1], pml = True, title = 'Fy in PML', name = 'Fy')
plot_data(Fpml[:,:,2], pml = True, title = 'Fz in PML', name = 'Fz')
plot_data(Gpml[:,:,0], pml = True, title = 'Gx in PML', name = 'Gx')
plot_data(Gpml[:,:,1], pml = True, title = 'Gy in PML', name = 'Gy')
plot_data(Gpml[:,:,2], pml = True, title = 'Gz in PML', name = 'Gz')
def check_values(benchmark, data, rtol, atol):
passed = np.allclose(benchmark, np.sum(np.abs(data[:,:])), rtol = rtol, atol = atol)
assert(passed)
rtol = 1e-09
atol = 1e-12
check_values(1013263608.6369569, Ex[:,:], rtol, atol)
check_values(717278253.4505507 , Ey[:,:], rtol, atol)
check_values(717866566.5718911 , Ez[:,:], rtol, atol)
check_values(3.0214509313437636, Bx[:,:], rtol, atol)
check_values(3.0242765102729985, By[:,:], rtol, atol)
check_values(3.0214509326970465, Bz[:,:], rtol, atol)
check_values(3.0188584528062377, F[:,:], rtol, atol)
check_values(1013672631.8764204, G[:,:], rtol, atol)
check_values(364287936.1526477 , Expml[:,:,0], rtol, atol)
check_values(183582351.3212558 , Expml[:,:,1], rtol, atol)
check_values(190065766.41491824, Expml[:,:,2], rtol, atol)
check_values(440581905.9336025 , Eypml[:,:,0], rtol, atol)
check_values(178117293.6629357 , Eypml[:,:,1], rtol, atol)
check_values(0.0 , Eypml[:,:,2], rtol, atol)
check_values(430277101.26568377, Ezpml[:,:,0], rtol, atol)
check_values(0.0 , Ezpml[:,:,1], rtol, atol)
check_values(190919663.2167449 , Ezpml[:,:,2], rtol, atol)
check_values(1.0565189315366146 , Bxpml[:,:,0], rtol, atol)
check_values(0.4618191395098556 , Bxpml[:,:,1], rtol, atol)
check_values(0.6849858273929585 , Bxpml[:,:,2], rtol, atol)
check_values(1.7228584190213505 , Bypml[:,:,0], rtol, atol)
check_values(0.47697331996765685, Bypml[:,:,1], rtol, atol)
check_values(0.0 , Bypml[:,:,2], rtol, atol)
check_values(1.5183380774611628 , Bzpml[:,:,0], rtol, atol)
check_values(0.0 , Bzpml[:,:,1], rtol, atol)
check_values(0.6849858291863835 , Bzpml[:,:,2], rtol, atol)
check_values(1.7808748509425263, Fpml[:,:,0], rtol, atol)
check_values(0.0 , Fpml[:,:,1], rtol, atol)
check_values(0.4307845604625681, Fpml[:,:,2], rtol, atol)
check_values(536552745.42701197, Gpml[:,:,0], rtol, atol)
check_values(0.0 , Gpml[:,:,1], rtol, atol)
check_values(196016270.97767758, Gpml[:,:,2], rtol, atol)
| true | true |
f72ffeac425776525d48f18b9e6845cf44684f3c | 35,719 | py | Python | Script/test.py | hlebars/YoutubeDataAnalysis | 0845effcdfdf6ab3281adc25840ed090e47498c8 | [
"MIT"
] | null | null | null | Script/test.py | hlebars/YoutubeDataAnalysis | 0845effcdfdf6ab3281adc25840ed090e47498c8 | [
"MIT"
] | null | null | null | Script/test.py | hlebars/YoutubeDataAnalysis | 0845effcdfdf6ab3281adc25840ed090e47498c8 | [
"MIT"
] | null | null | null | import pandas as pd
import datetime
import numpy as np
import os
import re
import matplotlib.pyplot as plot
import pytz
# @timeit (repeat=3,number=10)
def EclatedSubPlot(SerieAfterGrpBy,ActivatePlotting,ListOfDateAndTime,Abbreviation):
DicoDayOfWeek={
"00":('Mon','Monday'), "01":('Tue','Tuesday'), "02":('Wed','Wednesday'), "03":('Thu','Thursday'),
"04":('Fri','Friday'), "05":('Sat','Saturday'), "06":('Sun','Sunday')
}
DicoMonthOfTheYear = {
"01":("Jan", "January"),"02":("Feb","February"),"03":("Mar","March"),"04":("Apr","April"),"05":("May","May"),
"06":("Jun","June"),"07":("Jul","July"),"08":("Aug","August"),"09":("Sep","September"),"10":("Oct","October"),
"11":("Nov","November"),"12":("Dec","December")
}
df_unstack=SerieAfterGrpBy.unstack(level=0)
nblevels = df_unstack.index.nlevels
if nblevels!=1:
for ColumnsName in ListOfDateAndTime:
ListMultiIndexName=df_unstack.index.names
if ColumnsName in ListMultiIndexName:
level_index=ListMultiIndexName.index(ColumnsName)
if Abbreviation==True:
if ColumnsName=="WeekDay":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)
elif ColumnsName=="M":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)
elif Abbreviation==False:
if ColumnsName=="WeekDay":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)
elif ColumnsName=="M":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)
else:
if Abbreviation==True:
if ColumnsName=="WeekDay":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
elif ColumnsName=="M":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
elif Abbreviation==False:
if ColumnsName=="WeekDay":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
elif ColumnsName=="M":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
else:
if "WeekDay" in ListOfDateAndTime and "WeekDay"==ListOfDateAndTime[0]:
if Abbreviation==True:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
else:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
if "M" in ListOfDateAndTime and "M"==ListOfDateAndTime[0]:
if Abbreviation==True:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
elif Abbreviation==False:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
DicoConfigRowColumsSubPlot={"Y":(4,3),"M":(4,3),"W":(13,4),"D":(8,4),"WeekDay":(4,2),"h":(6,4),"m":(10,6),"s":(10,6)}
fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind="bar",sharex=True,sharey=True,legend=False,)#.flatten()#.map(set_xlabel=("toto"))#**kwargs)
# Add Legend for axis in function of the dimention of the subplot
for Row in range(DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]):
FigRow=fig[Row].flatten()
if DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2!=0 and Row%3==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:
FigRow[0].set_ylabel("Nb. Video Trending")
elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2==0 and Row%2==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:
FigRow[0].set_ylabel("Nb. Video Trending")
elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]==4:
FigRow[0].set_ylabel("Nb. Video Trending")
for Column in range(len(FigRow)):
FigRow[Column].set_xlabel("Time")
plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)
plot.show()
return df_unstack
def testtemps():
print(pytz.country_timezones('JP'))
# Hours=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
# Hours=pd.date_range('17:30:00', '21:00:00',freq='15T').strftime('%H:%M').tolist()
# pd.to_datetime(Hours,format='%H:%M')
# print(Hours)
Hours=pd.date_range('00:00:00', '23:59:00',freq=str(30)+'T').time
df_NumberHours=pd.DataFrame(0,index=Hours,columns=["Number","Label"])
# df_NumberHours["Label"]=HoursForLabels
# print(df_NumberHours["Label"].head(3))
Country="FRA"
PathToInputData=os.path.join("Script","Data","Data_IN","Youtube_CSV__And_JSON",Country+"videos.csv")
df=pd.read_csv(PathToInputData)#,engine="python")
#'video_id','title',
df=df.drop(columns=['channel_title','category_id','tags','thumbnail_link','comments_disabled','ratings_disabled','video_error_or_removed','description'])
#get the plublish time and put in the column publish time
df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
# print(df['publish_time'])
# ["JPN",
LocalTime=False
if LocalTime==True:
if Country=="USA":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('US/Central')
elif Country=="MEX":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Mexico_City')
elif Country=="FRA":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Paris')
elif Country=="DEU":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Berlin')
elif Country=="GBR":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/London')
elif Country=="IND":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Kolkata')
elif Country=="CAN":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Winnipeg')
elif Country=="KOR":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Seoul')
elif Country=="RUS":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Krasnoyarsk')
elif Country=="JPN":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Tokyo')
# filtertime=(df[df.index.time > datetime.time(12),] & df[df.index.time < datetime.time(13)])
#Converting LOcal time to UTC time if LocalToUTCTime==True
# df=ConvertLocalTimeToUTC(df,Country,LocalToUTCTime)
print(df["video_id"].nunique())
df = df.drop_duplicates(subset = 'video_id', keep = 'first')
print(df)
df.set_index( df['publish_time'], inplace=True)
# df_FiltResult=
# df=df.groupby([df.index.day_name()],)["views"].count()#,df.index.hour
# df.plot(kind="bar")
# plot.show()
df_grp=df.groupby([df.index.weekday,df.index.hour])
ser=df_grp["views"].count()
# print(df_grp["views"].agg(["count"]))
# print(df_grp["views"].agg(["count"]).loc[1])
# print(df_grp.get_group((1,0)))
# df.unstack(level=0).plot(kind='bar', subplots=True)
# plot.show()
DicoDayOfWeek={
"00":('Mon','Monday'), "01":('Tue','Tuesday'), "02":('Wed','Wednesday'), "03":('Thu','Thursday'),
"04":('Fri','Friday'), "05":('Sat','Saturday'), "06":('Sun','Sunday')
}
# ser.index[0][0] = df.index[0][0].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
# ser.unstack(level=0).plot(subplots=True, figsize=(70, 60), layout=(4, 2),kind="bar",sharex=True,title=ser.index[0][0] )
# plot.show()
# for i in range(1,max(df_grp.keys[0])):
# print(df_grp["views"].agg(["count"]).loc[i])
# df_grp.plot(y=df_grp["views"].agg(["count"]).loc[i].count)
# plot.show()
# fig, ax = plot.subplots(figsize=(10,4))
# # ax.plot(df_grp["views"].loc[1], df_grp['views'].count(), label=df_grp["views"].loc[1])
# for key, grp in df_grp:#df.groupby(ListOfDateAndTime):
# print(key,grp)
# ax.plot(grp.groupby(grp.index.hour), grp['views'].count(), label=key)
# ax.legend()
# plot.show()
# df.plot()
# plot.show()
# plot.show()
# filt=(df.title.str.find(sub)!=-1)
# filt=None
# df_FiltResult=df["title"].resample("D")
#juste le filtre
# df_FiltResultsample=df["title"][filt].resample("M").count()
# totalite de la periode
DicoMonthOfTheYear = {
"01":("Jan", "January"),"02":("Feb","February"),"03":("Mar","March"),"04":("Apr","April"),"05":("May","May"),
"06":("Jun","June"),"07":("Jul","July"),"08":("Aug","August"),"09":("Sep","September"),"10":("Oct","October"),
"11":("Nov","November"),"12":("Dec","December")
}
# sub=""
#fictionnary of group by possibilities
DicoGroubyPossibility={
"Y":df.index.year,
"M":df.index.month,
"W":df.index.week,
"D":df.index.day,
"h":df.index.hour,
"m":df.index.minute,
"s":df.index.second,
"time":df.index.time,
"date":df.index.date,
"WeekDay":df.index.weekday,
}
# ListOfDateAndTime=["M","D"]#,"M","D"]
ListOfDateAndTime=["WeekDay"]#,"M","D"]
#test if the list contain more than one parameter for grouby if it is true then it will group by by the composant o the list
if len(ListOfDateAndTime)==1:
#Create empty list for date and time classification
ListOfDate=[]
ListOfTime=[]
#Classify Date and time in the corresponding list in fucntion of it is in upper case or not upper=date low=time
for i in ListOfDateAndTime:
if i.isupper() or i=="date" or i=="WeekDay":
ListOfDate.append(i)
else:
ListOfTime.append(i)
#get the list of all indexes
SegmentOfDateOrTime=DicoGroubyPossibility[i].astype(str).tolist()
# and add a zero in front of the index string to have 00 h and not 0h or days etc
for DateOrTime in range(len(SegmentOfDateOrTime)):
if len(SegmentOfDateOrTime[DateOrTime])==1:
SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]
#Place it back in the columns of the date or time correspondant like Y(Year) or h(hour) to get a series grouby with different name
df.loc[:,i]=SegmentOfDateOrTime
#grouby in function of the entry in the list of date and time
# df_grp=df.groupby(ListOfDateAndTime)#["views"].count()
Abbreviation=True
df_grp=df.groupby([df.index.weekday,df.index.hour])#["views"].count()
df=df_grp["views"].count()
EclatedSubPlot(df,True,ListOfDateAndTime,Abbreviation)
# Abbreviation=False
# # fig, (ax1, ax2) = plot.subplots(2, 1)
# # df.plot(x='Weekday', y='h', ax=ax1, legend=False)
# # df.sort_values().plot(kind='barh', ax=ax2)
# ser=df_grp["views"].count()
# df_unstack=ser.unstack(level=0)
# nblevels = df_unstack.index.nlevels
# print(nblevels)
# if nblevels!=1:
# for ColumnsName in ListOfDateAndTime:
# ListMultiIndexName=df_unstack.index.names
# if ColumnsName in ListMultiIndexName:
# level_index=ListMultiIndexName.index(ColumnsName)
# if Abbreviation==True:
# if ColumnsName=="WeekDay":
# df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)
# elif ColumnsName=="M":
# df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)
# elif Abbreviation==False:
# if ColumnsName=="WeekDay":
# df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)
# elif ColumnsName=="M":
# df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)
# else:
# if Abbreviation==True:
# if ColumnsName=="WeekDay":
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
# elif ColumnsName=="M":
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
# elif Abbreviation==False:
# if ColumnsName=="WeekDay":
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
# elif ColumnsName=="M":
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
# else:
# if "WeekDay" in ListOfDateAndTime and "WeekDay"==ListOfDateAndTime[0]:
# if Abbreviation==True:
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
# else:
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
# else:
# if Abbreviation==True:
# df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
# else:
# df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
# if "M" in ListOfDateAndTime and "M"==ListOfDateAndTime[0]:
# if Abbreviation==True:
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
# elif Abbreviation==False:
# df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
# else:
# if Abbreviation==True:
# df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
# elif Abbreviation==False:
# df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
# print(df_unstack.index)
# # fig, axes=plot.subplots(nrows=4,ncols=2,)
# # axes[0][0].plot(df_unstack)
# # plot.show()
# # ax.plot(df_unstack)
# # fig = plot.figure() # create a figure object
# # axs = fig.subplots(nrows=4,ncols=2)
# # fig
# # for ax in axs:
# # ax.plot(df_grp[0])
# # create an axes object in the figure
# # ax.plot(df_unstack)
# # ax.set_ylabel('some numbers')
# # plot.figure(1)
# # df_unstack.plot()
# # fig=plot.figure()
# # ax1=fig.add_subplot(df_unstack)
# DicoConfigRowColumsSubPlot={"Y":(4,3),"M":(4,3),"W":(13,4),"D":(8,4),"WeekDay":(4,2),"h":(6,4),"m":(10,6),"s":(10,6)}
# fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind="bar",sharex=True,sharey=True,legend=False,).flatten()#.map(set_xlabel=("toto"))#**kwargs)
# fig=fig.flatten()
# # fig.text(0.5, 0.04, 'common xlabel', ha='center', va='center')
# # fig.text(0.06, 0.5, 'common ylabel', ha='center', va='center', rotation='vertical')
# # fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)
# for i in range(len(fig)):
# fig[i].set_ylabel("Nb. Video Trending")
# fig[i].set_xlabel("Time")
# plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)
# plot.show()
# plot.show()
# df_unstack[df_unstack.columns[0]].plot(ax=axes[0,0])
# df_unstack[df_unstack.columns[1]].plot(ax=axes[0,1])
# plot.show()
# rowlength = df_grp.ngroups//2
# fig, axs = plot.subplots()
# df_unstack.plot(subplot=True,layout=(4, 2), figsize=(70, 60),kind="bar",sharex=True,sharey=True,)
# fig=df_unstack.plot(subplot=True,ax=ax,kind="bar")
#title of the x axis of the plot
# ax.set_xlabel('common xlabel')
# fig.xlabel('common xlabel')
# fig.ylabel('common ylabel')
# plot.xlabel("hours")
#title of y axis of the plot
# plot.ylabel("Number Of Video Trending")
# plot.(xtitle="hours",ytitle="Number Of Video Trending")
# plot.tight_layout()
plot.show()
# plot.show()
# fig, ax = plot.subplots(figsize=(10,4))
# for key, grp in df.groupby(ListOfDateAndTime):
# ax.plot(grp['WeekDay'], grp['h'], label=key)
# ax.legend()
# plot.show()
#Go from pd series to dataframe with another index
df=df.to_frame(name = 'Number Of Video Trending').reset_index()
# fig, axs = plot.subplots(2, 1, sharex=True)
# # gs = df.groupby(["WeekDay","h"], axis=1)
# # df.set_index('WeekDay',inplace=True)
# gs = df.groupby(["WeekDay"], axis=1)
# for (_, g), ax in zip(gs, axs):
# g.plot.bar(stacked=True, ax=ax)
# plot.show()
if "WeekDay" in ListOfDateAndTime:
dayOfWeek={"00":'Monday', "01":'Tuesday', "02":'Wednesday', "03":'Thursday', "04":'Friday', "05":'Saturday', "06":'Sunday'}
df['WeekDay'] = df['WeekDay'].map(dayOfWeek)
#create the columns time in function of the date and time in listoftime
if len(ListOfDate)>0 and len(ListOfTime)>0:
df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)+" "+df[ListOfTime].astype(str).agg(':'.join, axis=1)
elif len(ListOfDate)>0 and len(ListOfTime)==0:
df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)
elif len(ListOfDate)==0 and len(ListOfTime)>0:
df['Time'] = df[ListOfTime].astype(str).agg(':'.join, axis=1)
#Put the column Time in index
df.set_index( df['Time'], inplace=True)
#add the column Time to ListOfDateAndTime before dropping every columns of ListOfDateAndTime to have a nice dataframe with just the number
#of videos trending and the time index
ListOfDateAndTime.append('Time')
df=df.drop(ListOfDateAndTime,axis=1)
else:
#if their is only one thing in the list
#get the list of all indexes
SegmentOfDateOrTime=DicoGroubyPossibility[ListOfDateAndTime[0]].astype(str).tolist()
# and add a zero in front of the index string to have 00 h and not 0h or days etc
for DateOrTime in range(len(SegmentOfDateOrTime)):
if len(SegmentOfDateOrTime[DateOrTime])==1:
SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]
#grouby in function of the entry in the list of index
df=df.groupby(SegmentOfDateOrTime)["views"].count()
#Create a dataframe with the grouby serie
df=df.to_frame(name = 'Number Of Video Trending')#.reset_index()
# Rename the dataframe index in Time
df.index=df.index.rename('Time')
# df1.columns=ListOfDateAndTime.split("_")
# df1=df1.to_frame(name = 'count').reset_index()
# df=df.loc[:,ListOfTime].join()
# df=df.resample("60T").views.count()#, df.index.minute df.index.hour
# df=df.groupby(pd.Grouper(key='publish_time',freq='30T')).views.count()#, df.index.minute df.index.hour
# df=df.groupby([df.index.second]).views.count()#df.index.hour,
# df=df.groupby([df.index.hour,df.index.minute,df.index.second]).views.count()
# df=df.groupby([df.index.year,df.index.month,df.index.day,df.index.hour,df.index.minute,df.index.second]).views.count()
# print(df)
df.plot(kind="bar")
plot.show()
# df_FiltResult=df["views"].resample("H").count()
# print(df_FiltResult)
FindText=" !"
filtre="Minute"
NumberOfVideoTrendingByCountry="Number Of Video "+Country
DicoResampleAndGraph={"Year":("Y","%y"),"Month":("M","%y/%m"),"Day":("D","%y/%m/%d"),"Hour":("H","%y/%m/%d %H"),"Minute":("m","%y/%m/%d %H:%m")}
# filt=(df.index.year==2017) | (df.index.year==2018)
filt=(df.index.month==12) | (df.index.day==25)
df=df[filt]
if FindText!="":
df["result"]=df["title"].apply(lambda x: 1 if x.find(FindText)!=-1 else 0)
df_FiltResult=df["result"].resample(DicoResampleAndGraph[filtre][0]).sum()
else:
df_FiltResult=df["views"].resample(DicoResampleAndGraph[filtre][0]).count()
df_FiltResult.columns=["Label",NumberOfVideoTrendingByCountry]
df_FiltResult.index=df_FiltResult.index.strftime(DicoResampleAndGraph[filtre][1])#-%d
# df_FiltResult.index=df_FiltResult.index.strftime("%V")#-%d
# print(df_FiltResult.index)
# filt=(df.title.str.find(sub)!=-1)
# df_FiltResult=df["title"][filt].resample("W").count()
# df_FiltResult=df["title"].resample("W").count()
# df_FiltResult.index=df_FiltResult.index.strftime("%V")#-%d
print(df_FiltResult)
# if df
# df_FiltResult.loc["value"]=df["title"][filt].count()
# df.index=pd.to_datetime(df.index,format='%Y-%m-%d')
# df_FiltResultsample.plot(y=0,kind="bar")
df_FiltResult.plot(y=0,kind="bar")
plot.show()
NumberOfVideoTrendingByCountry="Number Of Video "+Country
Months=["January","February","March","April","May","June","July","August","October","November","December"]
Years=[]
for Year in range(min(df.publish_time.dt.year),max(df.publish_time.dt.year)+1):
Years.append(Year)
df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=Years,columns=[NumberOfVideoTrendingByCountry])
print(min(df.publish_time.dt.year))
print(max(df.publish_time.dt.year))
sub=" Noël "
for Year in Years:
filtervalue=(df.publish_time.dt.year==Year) & (df.title.str.find(sub)!=-1)
df_VideoCountForDayOfTheWeek.loc[Year,NumberOfVideoTrendingByCountry]=max(df[filtervalue].count())
print(df_VideoCountForDayOfTheWeek)
WeekDays=["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=WeekDays,columns=["Number Of Videos"])
for WeekDay in WeekDays:
df_VideoCountForDayOfTheWeek.loc[WeekDay,"Number Of Videos"]=max(df[df.publish_time.dt.day_name()==WeekDay].count())
print(df_VideoCountForDayOfTheWeek)
df_VideoCountForDayOfTheWeek.plot(y="Number Of Videos",kind="bar")
plot.show()
#insert publish date in the corresponding columns
df.insert(5, 'publish_date', df['publish_time'].dt.date)
# convert them into datetime time
df['publish_time'] = df['publish_time'].dt.time
#convert the trending date string into a datetime format
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
#Put the trending date in the same format before soustracting them to
# get the time before trending
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
# functionning from 1 s tp 24h
IntervalMinute=1/60
if IntervalMinute==1/60:
counttotal=0
countindex=0
HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()
NumberOfVideoTrendingByCountry="Number Of Video "+Country
df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=["Label",NumberOfVideoTrendingByCountry])
df_NumberHours["Label"]=HoursForLabels
for index in range(len(HoursForLabels)):
if index<(len(HoursForLabels)-1):
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()
else:
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time="23:59:59",include_start=True,include_end=True).count()
else:
#insert publish date in the corresponding columns
df.insert(5, 'publish_date', df['publish_time'].dt.date)
# convert them into datetime time
df['publish_time'] = df['publish_time'].dt.time
#convert the trending date string into a datetime format
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
#Put the trending date in the same format before soustracting them to
# get the time before trending
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
#Get all time data in function of the day of the week if DayOfTheWeek=="All" skip this to have all day of the dataframe
df["weekday_publish_date"] = df["publish_date"].dt.day_name()
# df=GetDFFromWeekDay(df,DayOfTheWeek)
# get the time before trending
df["Time_Before_Trending"]=df["trending_date"].sub(df["publish_date"],axis=0)
# count the number of video publish in the same time
df_NumberHours=df['publish_time'].value_counts()
df_NumberHours.sort_values(0,ascending=True)
# df_NumberHours.index=sorted(df_NumberHours.index,key=)
df_NumberHours=df_NumberHours.sort_index()
HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()
for time in HoursForLabels:
if time not in df_NumberHours.index:
df_NumberHours.set_value(time,0)
df_NumberHours.index=df_NumberHours.index.time
#Supres the last row of the df for interval and video publish in the interval
# because it is 23:59:59 but is empty cause every thing goes to 00:00:00
df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)
# print(df_NumberHours)
# print(len(df))
# print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())
# df_NumberHours.plot(y=NumberOfVideoTrendingByCountry,kind="bar")
# plot.show()
##############################################################################################################################
# x=2
# print(df)
# print(df["views"].between_time(start_time="00:00:00",end_time="23:59:59").count())
# print(df["views"].count())
# print(len(df["views"]))
# df_NumberHours.loc["23:59",["Label",NumberOfVideoTrendingByCountry]] = "23:59",0
# print(df_NumberHours)
# for index in range(len(HoursForLabels)+1):
# if index<(len(HoursForLabels)-1):
# # if HoursForLabels[index]=="23:30":
# # x=1
# df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()
# elif index==(len(HoursForLabels)-1):
# df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index-1],end_time=HoursForLabels[-1],include_end=False).count()
# else:
# df_NumberHours.loc["23:59",NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[-1],end_time="23:59:59",include_start=True,include_end=True).count()
# df_NumberHours.set_index("Label",inplace=True)
# for index in range(len(HoursForLabels)):
# if index<(len(HoursForLabels)-1):
# df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()
# elif index==len(HoursForLabels)-1:
# df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[-1],end_time="23:59:59",include_end=True).count()
# df_NumberHours.loc["23:59",NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[-1],end_time="23:59:59",include_start=True,include_end=True).count()
# elif index==len(HoursForLabels):
# print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())
#0 a 03
def anepasutiliser():
print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())
print(df_NumberHours)
df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=["Label",NumberOfVideoTrendingByCountry])
df.insert(5, 'publish_date', df['publish_time'].dt.date)
#convert them into datetime time
# df['publish_time'] = df['publish_time'].dt.time
# df['publish_time'] =df['publish_time'] .astype('datetime64[D]')
df['publish_time'] = pd.DatetimeIndex(df['publish_time'])
df['publish_time']=df['publish_time'].dt.time
print(df['publish_time'])
# count the number of video publish in the same time
df["Count"]=df['publish_time'].value_counts()
df.sort_values('Count',ascending=True)
print(df)
pd.to_timedelta(df['publish_time'])
df.set_index(pd.to_datetime(df['publish_time'],"hh:mm:ss"), inplace=True)
print(df.index.time)
# df.set_index(pd.DatetimeIndex(df['publish_time']), inplace=True)
print(df.index)
print(df['views'].resample('T').sum())
df['publish_time'] = df['publish_time']
#convert the trending date string into a datetime format
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
#Put the trending date in the same format before soustracting them to
# get the time before trending
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
df["weekday_publish_date"] = df["publish_date"].dt.day_name()
# df=df[df.weekday_publish_date==DayOfTheWeek]
print(df)
# get the time before trending
df["Time_Before_Trending"]=df["trending_date"].sub(df["publish_date"],axis=0)
# count the number of video publish in the same time
Df_TimeAndNumberOfPublication=df['publish_time'].value_counts()
Df_TimeAndNumberOfPublication.sort_values(0,ascending=True)
# print(datetime.time(hour=,minute=-30,second=40))
print(df_NumberHours.tail(5))
#40562 via fonction via tableau 40723
#il faut que les valeur centrer entre 16:30 avec 15 min a gauche 15 min a droite soit increment/2
print(df_NumberHours["Number Of Video"].sum())
#et si les minutes sont egales a zero alors il faut retirer une heure
#
# df_NumberHours.plot(x="Label",y=NumberOfVideoTrendingByCountry, kind='bar')
# #title of the plot
# plot.title("Number of Video Trending in " +Country +" by publication time")
# #title of the x axis of the plot
# plot.xlabel('Time')
# #title of y axis of the plot
# plot.ylabel('Number of Video Trending')
# #show the graph
# plot.show()
testtemps()
def NumberOfVideoFilterByPublishTime(df,Country,IntervalMinute):
if IntervalMinute!=1/60:
df.set_index( df['publish_time'], inplace=True)
counttotal=0
countindex=0
IntervalMinute=1/60
HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()
NumberOfVideoTrendingByCountry="Number Of Video "+Country
df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=["Label",NumberOfVideoTrendingByCountry])
df_NumberHours["Label"]=HoursForLabels
for index in range(len(HoursForLabels)):
if index<(len(HoursForLabels)-1):
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()
else:
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time="23:59:59",include_start=True,include_end=True).count()
else:
#insert publish date in the corresponding columns
df.insert(5, 'publish_date', df['publish_time'].dt.date)
# convert them into datetime time
df['publish_time'] = df['publish_time'].dt.time
#convert the trending date string into a datetime format
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
#Put the trending date in the same format before soustracting them to
# get the time before trending
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
#Get all time data in function of the day of the week if DayOfTheWeek=="All" skip this to have all day of the dataframe
df["weekday_publish_date"] = df["publish_date"].dt.day_name()
df=GetDFFromWeekDay(df,DayOfTheWeek)
# get the time before trending
df["Time_Before_Trending"]=df["trending_date"].sub(df["publish_date"],axis=0)
# count the number of video publish in the same time
df_NumberHours=df['publish_time'].value_counts()
# df_NumberHours.sort_values(0,ascending=True)
#Supres the last row of the df for interval and video publish in the interval
# because it is 23:59:59 but is empty cause every thing goes to 00:00:00
df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)
return df_NumberHours | 42.777246 | 213 | 0.625605 | import pandas as pd
import datetime
import numpy as np
import os
import re
import matplotlib.pyplot as plot
import pytz
def EclatedSubPlot(SerieAfterGrpBy,ActivatePlotting,ListOfDateAndTime,Abbreviation):
DicoDayOfWeek={
"00":('Mon','Monday'), "01":('Tue','Tuesday'), "02":('Wed','Wednesday'), "03":('Thu','Thursday'),
"04":('Fri','Friday'), "05":('Sat','Saturday'), "06":('Sun','Sunday')
}
DicoMonthOfTheYear = {
"01":("Jan", "January"),"02":("Feb","February"),"03":("Mar","March"),"04":("Apr","April"),"05":("May","May"),
"06":("Jun","June"),"07":("Jul","July"),"08":("Aug","August"),"09":("Sep","September"),"10":("Oct","October"),
"11":("Nov","November"),"12":("Dec","December")
}
df_unstack=SerieAfterGrpBy.unstack(level=0)
nblevels = df_unstack.index.nlevels
if nblevels!=1:
for ColumnsName in ListOfDateAndTime:
ListMultiIndexName=df_unstack.index.names
if ColumnsName in ListMultiIndexName:
level_index=ListMultiIndexName.index(ColumnsName)
if Abbreviation==True:
if ColumnsName=="WeekDay":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)
elif ColumnsName=="M":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)
elif Abbreviation==False:
if ColumnsName=="WeekDay":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)
elif ColumnsName=="M":
df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)
else:
if Abbreviation==True:
if ColumnsName=="WeekDay":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
elif ColumnsName=="M":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
elif Abbreviation==False:
if ColumnsName=="WeekDay":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
elif ColumnsName=="M":
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
else:
if "WeekDay" in ListOfDateAndTime and "WeekDay"==ListOfDateAndTime[0]:
if Abbreviation==True:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)
else:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)
if "M" in ListOfDateAndTime and "M"==ListOfDateAndTime[0]:
if Abbreviation==True:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)
elif Abbreviation==False:
df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)
DicoConfigRowColumsSubPlot={"Y":(4,3),"M":(4,3),"W":(13,4),"D":(8,4),"WeekDay":(4,2),"h":(6,4),"m":(10,6),"s":(10,6)}
fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind="bar",sharex=True,sharey=True,legend=False,)msSubPlot[ListOfDateAndTime[0]][0]):
FigRow=fig[Row].flatten()
if DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2!=0 and Row%3==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:
FigRow[0].set_ylabel("Nb. Video Trending")
elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2==0 and Row%2==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:
FigRow[0].set_ylabel("Nb. Video Trending")
elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]==4:
FigRow[0].set_ylabel("Nb. Video Trending")
for Column in range(len(FigRow)):
FigRow[Column].set_xlabel("Time")
plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)
plot.show()
return df_unstack
def testtemps():
print(pytz.country_timezones('JP'))
Hours=pd.date_range('00:00:00', '23:59:00',freq=str(30)+'T').time
df_NumberHours=pd.DataFrame(0,index=Hours,columns=["Number","Label"])
Country="FRA"
PathToInputData=os.path.join("Script","Data","Data_IN","Youtube_CSV__And_JSON",Country+"videos.csv")
df=pd.read_csv(PathToInputData)
df=df.drop(columns=['channel_title','category_id','tags','thumbnail_link','comments_disabled','ratings_disabled','video_error_or_removed','description'])
df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
LocalTime=False
if LocalTime==True:
if Country=="USA":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('US/Central')
elif Country=="MEX":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Mexico_City')
elif Country=="FRA":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Paris')
elif Country=="DEU":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Berlin')
elif Country=="GBR":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/London')
elif Country=="IND":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Kolkata')
elif Country=="CAN":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Winnipeg')
elif Country=="KOR":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Seoul')
elif Country=="RUS":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Krasnoyarsk')
elif Country=="JPN":
df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Tokyo')
print(df["video_id"].nunique())
df = df.drop_duplicates(subset = 'video_id', keep = 'first')
print(df)
df.set_index( df['publish_time'], inplace=True)
df_grp=df.groupby([df.index.weekday,df.index.hour])
ser=df_grp["views"].count()
DicoDayOfWeek={
"00":('Mon','Monday'), "01":('Tue','Tuesday'), "02":('Wed','Wednesday'), "03":('Thu','Thursday'),
"04":('Fri','Friday'), "05":('Sat','Saturday'), "06":('Sun','Sunday')
}
"01":("Jan", "January"),"02":("Feb","February"),"03":("Mar","March"),"04":("Apr","April"),"05":("May","May"),
"06":("Jun","June"),"07":("Jul","July"),"08":("Aug","August"),"09":("Sep","September"),"10":("Oct","October"),
"11":("Nov","November"),"12":("Dec","December")
}
DicoGroubyPossibility={
"Y":df.index.year,
"M":df.index.month,
"W":df.index.week,
"D":df.index.day,
"h":df.index.hour,
"m":df.index.minute,
"s":df.index.second,
"time":df.index.time,
"date":df.index.date,
"WeekDay":df.index.weekday,
}
fDateAndTime=["WeekDay"]
if len(ListOfDateAndTime)==1:
ListOfDate=[]
ListOfTime=[]
for i in ListOfDateAndTime:
if i.isupper() or i=="date" or i=="WeekDay":
ListOfDate.append(i)
else:
ListOfTime.append(i)
SegmentOfDateOrTime=DicoGroubyPossibility[i].astype(str).tolist()
for DateOrTime in range(len(SegmentOfDateOrTime)):
if len(SegmentOfDateOrTime[DateOrTime])==1:
SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]
df.loc[:,i]=SegmentOfDateOrTime
ion=True
df_grp=df.groupby([df.index.weekday,df.index.hour])
df=df_grp["views"].count()
EclatedSubPlot(df,True,ListOfDateAndTime,Abbreviation)
ay', "05":'Saturday', "06":'Sunday'}
df['WeekDay'] = df['WeekDay'].map(dayOfWeek)
if len(ListOfDate)>0 and len(ListOfTime)>0:
df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)+" "+df[ListOfTime].astype(str).agg(':'.join, axis=1)
elif len(ListOfDate)>0 and len(ListOfTime)==0:
df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)
elif len(ListOfDate)==0 and len(ListOfTime)>0:
df['Time'] = df[ListOfTime].astype(str).agg(':'.join, axis=1)
df.set_index( df['Time'], inplace=True)
ListOfDateAndTime.append('Time')
df=df.drop(ListOfDateAndTime,axis=1)
else:
SegmentOfDateOrTime=DicoGroubyPossibility[ListOfDateAndTime[0]].astype(str).tolist()
for DateOrTime in range(len(SegmentOfDateOrTime)):
if len(SegmentOfDateOrTime[DateOrTime])==1:
SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]
df=df.groupby(SegmentOfDateOrTime)["views"].count()
df=df.to_frame(name = 'Number Of Video Trending')
df.index=df.index.rename('Time')
FindText=" !"
filtre="Minute"
NumberOfVideoTrendingByCountry="Number Of Video "+Country
DicoResampleAndGraph={"Year":("Y","%y"),"Month":("M","%y/%m"),"Day":("D","%y/%m/%d"),"Hour":("H","%y/%m/%d %H"),"Minute":("m","%y/%m/%d %H:%m")}
filt=(df.index.month==12) | (df.index.day==25)
df=df[filt]
if FindText!="":
df["result"]=df["title"].apply(lambda x: 1 if x.find(FindText)!=-1 else 0)
df_FiltResult=df["result"].resample(DicoResampleAndGraph[filtre][0]).sum()
else:
df_FiltResult=df["views"].resample(DicoResampleAndGraph[filtre][0]).count()
df_FiltResult.columns=["Label",NumberOfVideoTrendingByCountry]
df_FiltResult.index=df_FiltResult.index.strftime(DicoResampleAndGraph[filtre][1])
print(df_FiltResult)
df_FiltResult.plot(y=0,kind="bar")
plot.show()
NumberOfVideoTrendingByCountry="Number Of Video "+Country
Months=["January","February","March","April","May","June","July","August","October","November","December"]
Years=[]
for Year in range(min(df.publish_time.dt.year),max(df.publish_time.dt.year)+1):
Years.append(Year)
df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=Years,columns=[NumberOfVideoTrendingByCountry])
print(min(df.publish_time.dt.year))
print(max(df.publish_time.dt.year))
sub=" Noël "
for Year in Years:
filtervalue=(df.publish_time.dt.year==Year) & (df.title.str.find(sub)!=-1)
df_VideoCountForDayOfTheWeek.loc[Year,NumberOfVideoTrendingByCountry]=max(df[filtervalue].count())
print(df_VideoCountForDayOfTheWeek)
WeekDays=["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=WeekDays,columns=["Number Of Videos"])
for WeekDay in WeekDays:
df_VideoCountForDayOfTheWeek.loc[WeekDay,"Number Of Videos"]=max(df[df.publish_time.dt.day_name()==WeekDay].count())
print(df_VideoCountForDayOfTheWeek)
df_VideoCountForDayOfTheWeek.plot(y="Number Of Videos",kind="bar")
plot.show()
df.insert(5, 'publish_date', df['publish_time'].dt.date)
df['publish_time'] = df['publish_time'].dt.time
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
IntervalMinute=1/60
if IntervalMinute==1/60:
counttotal=0
countindex=0
HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()
NumberOfVideoTrendingByCountry="Number Of Video "+Country
df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=["Label",NumberOfVideoTrendingByCountry])
df_NumberHours["Label"]=HoursForLabels
for index in range(len(HoursForLabels)):
if index<(len(HoursForLabels)-1):
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()
else:
df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df["views"].between_time(start_time=HoursForLabels[index],end_time="23:59:59",include_start=True,include_end=True).count()
else:
df.insert(5, 'publish_date', df['publish_time'].dt.date)
df['publish_time'] = df['publish_time'].dt.time
df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')
df["trending_date"]=df["trending_date"].values.astype('datetime64[D]')
df["publish_date"]=df["publish_date"].values.astype('datetime64[D]')
df["weekday_publish_date"] = df["publish_date"].dt.day_name()
df["Time_Before_Trending"]=df["trending_date"].sub(df["publish_date"],axis=0)
df_NumberHours=df['publish_time'].value_counts()
df_NumberHours.sort_values(0,ascending=True)
df_NumberHours=df_NumberHours.sort_index()
HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()
for time in HoursForLabels:
if time not in df_NumberHours.index:
df_NumberHours.set_value(time,0)
df_NumberHours.index=df_NumberHours.index.time
df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)
| true | true |
f73000570bed55023fdcd7e0333417e05cb7f21a | 297 | py | Python | einops/__init__.py | ductm104/einops | a9e3f6b0d18e01e326f74bd9861288aff94e3b2c | [
"MIT"
] | 2 | 2021-07-17T09:30:42.000Z | 2021-12-10T07:42:21.000Z | einops/__init__.py | ductm104/einops | a9e3f6b0d18e01e326f74bd9861288aff94e3b2c | [
"MIT"
] | null | null | null | einops/__init__.py | ductm104/einops | a9e3f6b0d18e01e326f74bd9861288aff94e3b2c | [
"MIT"
] | null | null | null | __author__ = 'Alex Rogozhnikov'
__version__ = '0.3.0'
class EinopsError(RuntimeError):
""" Runtime error thrown by einops """
pass
__all__ = ['rearrange', 'reduce', 'repeat', 'parse_shape', 'asnumpy', 'EinopsError']
from .einops import rearrange, reduce, repeat, parse_shape, asnumpy
| 22.846154 | 84 | 0.703704 | __author__ = 'Alex Rogozhnikov'
__version__ = '0.3.0'
class EinopsError(RuntimeError):
pass
__all__ = ['rearrange', 'reduce', 'repeat', 'parse_shape', 'asnumpy', 'EinopsError']
from .einops import rearrange, reduce, repeat, parse_shape, asnumpy
| true | true |
f730008035e6d577e29225eff7316628cc5ad753 | 68 | py | Python | inference_converter/__init__.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | inference_converter/__init__.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | inference_converter/__init__.py | mzeynali/dl-model-converter | 3adff16661254f29a4e9b2d76402ba9b064d3d97 | [
"Apache-2.0"
] | null | null | null | import os
import sys
sys.path.append(os.path.dirname(__file__))
| 8.5 | 42 | 0.75 | import os
import sys
sys.path.append(os.path.dirname(__file__))
| true | true |
f7300106c5722946f16c6d7a68325a64b58c05ce | 787 | py | Python | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | 1 | 2021-01-18T13:55:40.000Z | 2021-01-18T13:55:40.000Z | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | null | null | null | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import numpy as np
import pythtb as pt
import tbmodels as tb
def test_compare_pythtb():
pt_model = pt.tb_model(1, 1, lat=[[1]], orb=[[0], [0.2]])
tb_model = tb.Model(dim=1, pos=[[0], [0.2]], uc=[[1]])
pt_model.set_hop(3j, 0, 1, [1])
tb_model.add_hop(3j, 0, 1, [1])
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0])).all()
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0], convention=1)).all()
assert np.isclose(pt_model._gen_ham([1]), tb_model.hamilton([1], convention=1)).all()
assert np.isclose(pt_model._gen_ham([0.2]), tb_model.hamilton(0.2, convention=1)).all()
| 32.791667 | 91 | 0.655654 |
import numpy as np
import pythtb as pt
import tbmodels as tb
def test_compare_pythtb():
pt_model = pt.tb_model(1, 1, lat=[[1]], orb=[[0], [0.2]])
tb_model = tb.Model(dim=1, pos=[[0], [0.2]], uc=[[1]])
pt_model.set_hop(3j, 0, 1, [1])
tb_model.add_hop(3j, 0, 1, [1])
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0])).all()
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0], convention=1)).all()
assert np.isclose(pt_model._gen_ham([1]), tb_model.hamilton([1], convention=1)).all()
assert np.isclose(pt_model._gen_ham([0.2]), tb_model.hamilton(0.2, convention=1)).all()
| true | true |
f73001805276877dee1c73c528d58c9860590720 | 10,586 | py | Python | google/cloud/aiplatform_v1/types/featurestore_online_service.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/featurestore_online_service.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/featurestore_online_service.py | lclc19/python-aiplatform | d8da2e365277441abadb04328943f23345d72b0e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector
from google.cloud.aiplatform_v1.types import types
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"ReadFeatureValuesRequest",
"ReadFeatureValuesResponse",
"StreamingReadFeatureValuesRequest",
"FeatureValue",
"FeatureValueList",
},
)
class ReadFeatureValuesRequest(proto.Message):
r"""Request message for
[FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues].
Attributes:
entity_type (str):
Required. The resource name of the EntityType for the entity
being read. Value format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``.
For example, for a machine learning model predicting user
clicks on a website, an EntityType ID could be ``user``.
entity_id (str):
Required. ID for a specific entity. For example, for a
machine learning model predicting user clicks on a website,
an entity ID could be ``user_123``.
feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector):
Required. Selector choosing Features of the
target EntityType.
"""
entity_type = proto.Field(proto.STRING, number=1,)
entity_id = proto.Field(proto.STRING, number=2,)
feature_selector = proto.Field(
proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector,
)
class ReadFeatureValuesResponse(proto.Message):
r"""Response message for
[FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues].
Attributes:
header (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.Header):
Response header.
entity_view (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView):
Entity view with Feature values. This may be
the entity in the Featurestore if values for all
Features were requested, or a projection of the
entity in the Featurestore if values for only
some Features were requested.
"""
class FeatureDescriptor(proto.Message):
r"""Metadata for requested Features.
Attributes:
id (str):
Feature ID.
"""
id = proto.Field(proto.STRING, number=1,)
class Header(proto.Message):
r"""Response header with metadata for the requested
[ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1.ReadFeatureValuesRequest.entity_type]
and Features.
Attributes:
entity_type (str):
The resource name of the EntityType from the
[ReadFeatureValuesRequest][google.cloud.aiplatform.v1.ReadFeatureValuesRequest].
Value format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``.
feature_descriptors (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.FeatureDescriptor]):
List of Feature metadata corresponding to each piece of
[ReadFeatureValuesResponse.data][].
"""
entity_type = proto.Field(proto.STRING, number=1,)
feature_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ReadFeatureValuesResponse.FeatureDescriptor",
)
class EntityView(proto.Message):
r"""Entity view with Feature values.
Attributes:
entity_id (str):
ID of the requested entity.
data (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView.Data]):
Each piece of data holds the k requested values for one
requested Feature. If no values for the requested Feature
exist, the corresponding cell will be empty. This has the
same size and is in the same order as the features from the
header
[ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1.ReadFeatureValuesResponse.header].
"""
class Data(proto.Message):
r"""Container to hold value(s), successive in time, for one
Feature from the request.
Attributes:
value (google.cloud.aiplatform_v1.types.FeatureValue):
Feature value if a single value is requested.
values (google.cloud.aiplatform_v1.types.FeatureValueList):
Feature values list if values, successive in
time, are requested. If the requested number of
values is greater than the number of existing
Feature values, nonexistent values are omitted
instead of being returned as empty.
"""
value = proto.Field(
proto.MESSAGE, number=1, oneof="data", message="FeatureValue",
)
values = proto.Field(
proto.MESSAGE, number=2, oneof="data", message="FeatureValueList",
)
entity_id = proto.Field(proto.STRING, number=1,)
data = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ReadFeatureValuesResponse.EntityView.Data",
)
header = proto.Field(proto.MESSAGE, number=1, message=Header,)
entity_view = proto.Field(proto.MESSAGE, number=2, message=EntityView,)
class StreamingReadFeatureValuesRequest(proto.Message):
r"""Request message for
[FeaturestoreOnlineServingService.StreamingFeatureValuesRead][].
Attributes:
entity_type (str):
Required. The resource name of the entities' type. Value
format:
``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``.
For example, for a machine learning model predicting user
clicks on a website, an EntityType ID could be ``user``.
entity_ids (Sequence[str]):
Required. IDs of entities to read Feature values of. The
maximum number of IDs is 100. For example, for a machine
learning model predicting user clicks on a website, an
entity ID could be ``user_123``.
feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector):
Required. Selector choosing Features of the
target EntityType. Feature IDs will be
deduplicated.
"""
entity_type = proto.Field(proto.STRING, number=1,)
entity_ids = proto.RepeatedField(proto.STRING, number=2,)
feature_selector = proto.Field(
proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector,
)
class FeatureValue(proto.Message):
r"""Value for a feature.
NEXT ID: 15
Attributes:
bool_value (bool):
Bool type feature value.
double_value (float):
Double type feature value.
int64_value (int):
Int64 feature value.
string_value (str):
String feature value.
bool_array_value (google.cloud.aiplatform_v1.types.BoolArray):
A list of bool type feature value.
double_array_value (google.cloud.aiplatform_v1.types.DoubleArray):
A list of double type feature value.
int64_array_value (google.cloud.aiplatform_v1.types.Int64Array):
A list of int64 type feature value.
string_array_value (google.cloud.aiplatform_v1.types.StringArray):
A list of string type feature value.
bytes_value (bytes):
Bytes feature value.
metadata (google.cloud.aiplatform_v1.types.FeatureValue.Metadata):
Metadata of feature value.
"""
class Metadata(proto.Message):
r"""Metadata of feature value.
Attributes:
generate_time (google.protobuf.timestamp_pb2.Timestamp):
Feature generation timestamp. Typically, it
is provided by user at feature ingestion time.
If not, feature store will use the system
timestamp when the data is ingested into feature
store.
"""
generate_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
bool_value = proto.Field(proto.BOOL, number=1, oneof="value",)
double_value = proto.Field(proto.DOUBLE, number=2, oneof="value",)
int64_value = proto.Field(proto.INT64, number=5, oneof="value",)
string_value = proto.Field(proto.STRING, number=6, oneof="value",)
bool_array_value = proto.Field(
proto.MESSAGE, number=7, oneof="value", message=types.BoolArray,
)
double_array_value = proto.Field(
proto.MESSAGE, number=8, oneof="value", message=types.DoubleArray,
)
int64_array_value = proto.Field(
proto.MESSAGE, number=11, oneof="value", message=types.Int64Array,
)
string_array_value = proto.Field(
proto.MESSAGE, number=12, oneof="value", message=types.StringArray,
)
bytes_value = proto.Field(proto.BYTES, number=13, oneof="value",)
metadata = proto.Field(proto.MESSAGE, number=14, message=Metadata,)
class FeatureValueList(proto.Message):
r"""Container for list of values.
Attributes:
values (Sequence[google.cloud.aiplatform_v1.types.FeatureValue]):
A list of feature values. All of them should
be the same data type.
"""
values = proto.RepeatedField(proto.MESSAGE, number=1, message="FeatureValue",)
__all__ = tuple(sorted(__protobuf__.manifest))
| 40.250951 | 136 | 0.657472 |
import proto
from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector
from google.cloud.aiplatform_v1.types import types
from google.protobuf import timestamp_pb2
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"ReadFeatureValuesRequest",
"ReadFeatureValuesResponse",
"StreamingReadFeatureValuesRequest",
"FeatureValue",
"FeatureValueList",
},
)
class ReadFeatureValuesRequest(proto.Message):
entity_type = proto.Field(proto.STRING, number=1,)
entity_id = proto.Field(proto.STRING, number=2,)
feature_selector = proto.Field(
proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector,
)
class ReadFeatureValuesResponse(proto.Message):
class FeatureDescriptor(proto.Message):
id = proto.Field(proto.STRING, number=1,)
class Header(proto.Message):
entity_type = proto.Field(proto.STRING, number=1,)
feature_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ReadFeatureValuesResponse.FeatureDescriptor",
)
class EntityView(proto.Message):
class Data(proto.Message):
value = proto.Field(
proto.MESSAGE, number=1, oneof="data", message="FeatureValue",
)
values = proto.Field(
proto.MESSAGE, number=2, oneof="data", message="FeatureValueList",
)
entity_id = proto.Field(proto.STRING, number=1,)
data = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ReadFeatureValuesResponse.EntityView.Data",
)
header = proto.Field(proto.MESSAGE, number=1, message=Header,)
entity_view = proto.Field(proto.MESSAGE, number=2, message=EntityView,)
class StreamingReadFeatureValuesRequest(proto.Message):
entity_type = proto.Field(proto.STRING, number=1,)
entity_ids = proto.RepeatedField(proto.STRING, number=2,)
feature_selector = proto.Field(
proto.MESSAGE, number=3, message=gca_feature_selector.FeatureSelector,
)
class FeatureValue(proto.Message):
class Metadata(proto.Message):
generate_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
bool_value = proto.Field(proto.BOOL, number=1, oneof="value",)
double_value = proto.Field(proto.DOUBLE, number=2, oneof="value",)
int64_value = proto.Field(proto.INT64, number=5, oneof="value",)
string_value = proto.Field(proto.STRING, number=6, oneof="value",)
bool_array_value = proto.Field(
proto.MESSAGE, number=7, oneof="value", message=types.BoolArray,
)
double_array_value = proto.Field(
proto.MESSAGE, number=8, oneof="value", message=types.DoubleArray,
)
int64_array_value = proto.Field(
proto.MESSAGE, number=11, oneof="value", message=types.Int64Array,
)
string_array_value = proto.Field(
proto.MESSAGE, number=12, oneof="value", message=types.StringArray,
)
bytes_value = proto.Field(proto.BYTES, number=13, oneof="value",)
metadata = proto.Field(proto.MESSAGE, number=14, message=Metadata,)
class FeatureValueList(proto.Message):
values = proto.RepeatedField(proto.MESSAGE, number=1, message="FeatureValue",)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f7300271abe53d1c530313d92118bc1bdad057e3 | 2,612 | py | Python | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import os
import sys
from ooni import canonical_bouncer
from ooni.report import __version__
from ooni.report import tool
from ooni.settings import config
from twisted.python import usage
class Options(usage.Options):
synopsis = """%s [options] upload | status
""" % (os.path.basename(sys.argv[0]),)
optFlags = [
["default-collector", "d", "Upload the reports to the default "
"collector that is looked up with the "
"canonical bouncer."]
]
optParameters = [
["configfile", "f", None,
"Specify the configuration file to use."],
["collector", "c", None,
"Specify the collector to upload the result to."],
["bouncer", "b", None,
"Specify the bouncer to query for a collector."]
]
def opt_version(self):
print("oonireport version: %s" % __version__)
sys.exit(0)
def parseArgs(self, *args):
if len(args) == 0:
raise usage.UsageError(
"Must specify at least one command"
)
return
self['command'] = args[0]
if self['command'] not in ("upload", "status"):
raise usage.UsageError(
"Must specify either command upload or status"
)
if self['command'] == "upload":
try:
self['report_file'] = args[1]
except IndexError:
self['report_file'] = None
def tor_check():
if not config.tor.socks_port:
print("Currently oonireport requires that you start Tor yourself "
"and set the socks_port inside of ooniprobe.conf")
sys.exit(1)
def run():
options = Options()
try:
options.parseOptions()
except Exception as exc:
print("Error: %s" % exc)
print(options)
sys.exit(2)
config.global_options = dict(options)
config.set_paths()
config.read_config_file()
if options['default-collector']:
options['bouncer'] = canonical_bouncer
if options['command'] == "upload" and options['report_file']:
tor_check()
return tool.upload(options['report_file'],
options['collector'],
options['bouncer'])
elif options['command'] == "upload":
tor_check()
return tool.upload_all(options['collector'],
options['bouncer'])
elif options['command'] == "status":
return tool.status()
else:
print(options)
| 28.703297 | 74 | 0.561256 | from __future__ import print_function
import os
import sys
from ooni import canonical_bouncer
from ooni.report import __version__
from ooni.report import tool
from ooni.settings import config
from twisted.python import usage
class Options(usage.Options):
synopsis = """%s [options] upload | status
""" % (os.path.basename(sys.argv[0]),)
optFlags = [
["default-collector", "d", "Upload the reports to the default "
"collector that is looked up with the "
"canonical bouncer."]
]
optParameters = [
["configfile", "f", None,
"Specify the configuration file to use."],
["collector", "c", None,
"Specify the collector to upload the result to."],
["bouncer", "b", None,
"Specify the bouncer to query for a collector."]
]
def opt_version(self):
print("oonireport version: %s" % __version__)
sys.exit(0)
def parseArgs(self, *args):
if len(args) == 0:
raise usage.UsageError(
"Must specify at least one command"
)
return
self['command'] = args[0]
if self['command'] not in ("upload", "status"):
raise usage.UsageError(
"Must specify either command upload or status"
)
if self['command'] == "upload":
try:
self['report_file'] = args[1]
except IndexError:
self['report_file'] = None
def tor_check():
if not config.tor.socks_port:
print("Currently oonireport requires that you start Tor yourself "
"and set the socks_port inside of ooniprobe.conf")
sys.exit(1)
def run():
options = Options()
try:
options.parseOptions()
except Exception as exc:
print("Error: %s" % exc)
print(options)
sys.exit(2)
config.global_options = dict(options)
config.set_paths()
config.read_config_file()
if options['default-collector']:
options['bouncer'] = canonical_bouncer
if options['command'] == "upload" and options['report_file']:
tor_check()
return tool.upload(options['report_file'],
options['collector'],
options['bouncer'])
elif options['command'] == "upload":
tor_check()
return tool.upload_all(options['collector'],
options['bouncer'])
elif options['command'] == "status":
return tool.status()
else:
print(options)
| true | true |
f7300289bf48754135726dad8a8c684a9ab7d495 | 14,855 | py | Python | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 36 | 2019-10-22T11:44:37.000Z | 2022-03-15T21:27:03.000Z | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 6 | 2020-10-03T15:13:26.000Z | 2021-09-25T14:05:50.000Z | queryable_properties/managers.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 3 | 2021-04-26T08:30:46.000Z | 2021-08-18T09:04:49.000Z | # encoding: utf-8
from __future__ import unicode_literals
import six
from django.db.models import Manager
from django.db.models.query import QuerySet
from .compat import (ANNOTATION_SELECT_CACHE_NAME, ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP, chain_query, chain_queryset,
ModelIterable, ValuesQuerySet)
from .exceptions import QueryablePropertyDoesNotExist, QueryablePropertyError
from .query import QueryablePropertiesQueryMixin
from .utils import get_queryable_property
from .utils.internal import InjectableMixin, QueryPath, QueryablePropertyReference
class QueryablePropertiesIterable(InjectableMixin):
"""
An iterable that yields the actual results of a queryset while correctly
processing columns of queryable properties. It is closely related to
Django's BaseIterable and will be used as a mixin for its subclasses in all
(recent) Django versions that have it. In all other (older) versions, this
class will be used as a standalone iterable instead.
"""
def __init__(self, queryset, *args, **kwargs):
"""
Initialize a new iterable for the given queryset. If an iterable is
given it will be used to retrieve the model instances before applying
queryable properties logic (standalone usage for older Django
versions). Otherwise, the __iter__ implementation of the base class
is used to get the model instances (usage as mixin).
:param QuerySet queryset: The queryset to perform the database query
for.
:param collections.Iterable iterable: The optional iterable to use for
standalone usage.
:param args: Positional arguments to pass through to the base class
initialization when used as a mixin.
:param kwargs: Keyword arguments to pass through to the base class
initialization when used as a mixin.
:keyword collections.Iterable iterable: The optional iterable to use
for standalone usage.
"""
self.queryset = queryset
# Only perform the super call if the class is used as a mixin
if self.__class__.__bases__ != (InjectableMixin,):
super(QueryablePropertiesIterable, self).__init__(queryset, *args, **kwargs)
self.iterable = kwargs.get('iterable') or super(QueryablePropertiesIterable, self).__iter__()
self.yields_model_instances = ((ModelIterable is not None and isinstance(self, ModelIterable)) or
(ValuesQuerySet is not None and not isinstance(self.queryset, ValuesQuerySet)))
def __iter__(self):
"""
Yield the model objects for the queryset associated with this iterator
with their correctly processed selected queryable properties.
:return: A generator that yields the model objects.
"""
original_query = self.queryset.query
try:
self.queryset.query = chain_query(original_query)
final_aliases = self._setup_queryable_properties()
for obj in self.iterable:
if self.yields_model_instances:
# Retrieve the annotation values from each renamed
# attribute and use it to populate the cache for the
# corresponding queryable property on each object while
# removing the weird, renamed attributes.
for changed_name, property_ref in six.iteritems(final_aliases):
value = getattr(obj, changed_name)
delattr(obj, changed_name)
if property_ref:
property_ref.descriptor.set_cached_value(obj, value)
yield obj
finally:
self.queryset.query = original_query
def _setup_queryable_properties(self):
"""
Perform the required setup to correctly process queryable property
values.
Change the internal aliases of the annotations that belong to queryable
properties in the query of the associated queryset to something unique
and return a dictionary mapping the queryable properties to the changed
aliases. This is necessary to allow Django to populate the annotation
attributes on the resulting model instances, which would otherwise call
the setter of the queryable properties. This way, Django can populate
attributes with different names and avoid using the setter methods.
Also make sure that ordering by queryable properties works in older
Django versions.
:return: A dictionary mapping the final aliases for queryable
properties to the corresponding references to be able to
retrieve the values from the DB and apply them to the correct
property. The property reference may be None, indicating that
the retrieved value should be discarded.
:rtype: dict[str, QueryablePropertyReference | None]
"""
query = self.queryset.query
final_aliases = {}
select = dict(query.annotation_select)
for property_ref in query._queryable_property_annotations:
annotation_name = six.text_type(property_ref.full_path)
# Older Django versions don't work with the annotation select dict
# when it comes to ordering, so queryable property annotations used
# for ordering need special treatment.
order_by_occurrences = []
if ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP: # pragma: no cover
order_by_occurrences = [index for index, field_name in enumerate(query.order_by)
if field_name in (annotation_name, '-{}'.format(annotation_name))]
if order_by_occurrences and annotation_name not in select and annotation_name in query.annotations:
select[annotation_name] = query.annotations[annotation_name]
final_aliases[annotation_name] = None
if not self.yields_model_instances or annotation_name not in select:
# The queryable property annotation does not require selection
# or no renaming needs to occur since the queryset doesn't
# yield model instances.
continue
# Suffix the original annotation name with the lookup separator to
# create a non-clashing name: both model field an queryable
# property names are not allowed to contain the separator and a
# relation path ending with the separator would be invalid as well.
changed_name = six.text_type(property_ref.full_path + '')
final_aliases[changed_name] = final_aliases.pop(annotation_name, property_ref)
select[changed_name] = select.pop(annotation_name)
for index in order_by_occurrences: # pragma: no cover
# Apply the changed names to the ORDER BY clause.
query.order_by[index] = query.order_by[index].replace(annotation_name, changed_name)
# Patch the correct select property on the query with the new names,
# since this property is used by the SQL compiler to build the actual
# SQL query (which is where the changed names should be used).
setattr(query, ANNOTATION_SELECT_CACHE_NAME, select)
return final_aliases
class QueryablePropertiesQuerySetMixin(InjectableMixin):
"""
A mixin for Django's :class:`django.db.models.QuerySet` objects that allows
to use queryable properties in filters, annotations and update queries.
"""
def init_injected_attrs(self):
# To work correctly, a query using the QueryablePropertiesQueryMixin is
# required. If the current query is not using the mixin already, it
# will be dynamically injected into the query. That way, other Django
# extensions using custom query objects are also supported.
class_name = 'QueryableProperties' + self.query.__class__.__name__
self.query = QueryablePropertiesQueryMixin.inject_into_object(chain_query(self.query), class_name)
@property
def _iterable_class(self):
# Override the regular _iterable_class attribute of recent Django
# versions with a property that also stores the value in the instance
# dict, but automatically mixes the QueryablePropertiesModelIterable
# into the base class on getter access if the base class yields model
# instances. That way, the queryable properties extensions stays
# compatible to custom iterable classes while querysets can still be
# pickled due to the base class being in the instance dict.
cls = self.__dict__['_iterable_class']
return QueryablePropertiesIterable.mix_with_class(cls, 'QueryableProperties' + cls.__name__)
@_iterable_class.setter
def _iterable_class(self, value):
self.__dict__['_iterable_class'] = value
def _clone(self, klass=None, *args, **kwargs):
if klass: # pragma: no cover
# In older Django versions, the class of the queryset may be
# replaced with a dynamically created class based on the current
# class and the value of klass while cloning (e.g when using
# .values()). Therefore this needs to be re-injected to be on top
# of the MRO again to enable queryable properties functionality.
klass = QueryablePropertiesQuerySetMixin.mix_with_class(klass, 'QueryableProperties' + klass.__name__)
args = (klass,) + args
clone = super(QueryablePropertiesQuerySetMixin, self)._clone(*args, **kwargs)
# Since the _iterable_class property may return a dynamically created
# class, the value of a clone must be reset to the base class.
if '_iterable_class' in self.__dict__:
clone._iterable_class = self.__dict__['_iterable_class']
return clone
def _resolve_update_kwargs(self, **kwargs):
"""
Look for the names of queryable properties in the given keyword
arguments for an update query and correctly resolve them into their
actual keyword arguments.
:param kwargs: Keyword arguments of an update query.
:return: A dictionary containing the resolved arguments.
:rtype: dict
"""
original_names = set(kwargs)
for original_name in original_names:
try:
prop = get_queryable_property(self.model, original_name)
except QueryablePropertyDoesNotExist:
continue
if not prop.get_update_kwargs:
raise QueryablePropertyError('Queryable property "{}" does not implement queryset updating.'
.format(prop))
# Call the method recursively since queryable properties can build
# upon each other.
additional_kwargs = self._resolve_update_kwargs(
**prop.get_update_kwargs(self.model, kwargs.pop(original_name)))
# Make sure that there are no conflicting values after resolving
# the update keyword arguments of the queryable properties.
for additional_name, value in six.iteritems(additional_kwargs):
if additional_name in kwargs and kwargs[additional_name] != value:
raise QueryablePropertyError(
'Updating queryable property "{prop}" would change field "{field}", but a conflicting value '
'was set for this field by another queryable property or explicitly in the update arguments.'
.format(prop=prop, field=additional_name)
)
kwargs[additional_name] = value
return kwargs
def select_properties(self, *names):
"""
Add the annotations of the queryable properties with the specified
names to this query. The annotation values will be cached in the
properties of resulting model instances, regardless of the regular
caching behavior of the queried properties.
:param names: Names of queryable properties.
:return: A copy of this queryset with the added annotations.
:rtype: QuerySet
"""
queryset = chain_queryset(self)
for name in names:
property_ref = QueryablePropertyReference(get_queryable_property(self.model, name), self.model, QueryPath())
# A full GROUP BY is required if the query is not limited to
# certain fields. Since only certain types of queries had the
# _fields attribute in old Django versions, fall back to checking
# for existing selection, on which the GROUP BY would be based.
full_group_by = not getattr(self, '_fields', self.query.select)
with queryset.query._add_queryable_property_annotation(property_ref, full_group_by, select=True):
pass
return queryset
def iterator(self, *args, **kwargs):
# Recent Django versions use the associated iterable class for the
# iterator() implementation, where the QueryablePropertiesModelIterable
# will be already mixed in. In older Django versions, use a standalone
# QueryablePropertiesModelIterable instead to perform the queryable
# properties processing.
iterable = super(QueryablePropertiesQuerySetMixin, self).iterator(*args, **kwargs)
if '_iterable_class' not in self.__dict__: # pragma: no cover
return iter(QueryablePropertiesIterable(self, iterable=iterable))
return iterable
def update(self, **kwargs):
# Resolve any queryable properties into their actual update kwargs
# before calling the base update method.
kwargs = self._resolve_update_kwargs(**kwargs)
return super(QueryablePropertiesQuerySetMixin, self).update(**kwargs)
class QueryablePropertiesQuerySet(QueryablePropertiesQuerySetMixin, QuerySet):
"""
A special queryset class that allows to use queryable properties in its
filter conditions, annotations and update queries.
"""
pass
if hasattr(Manager, 'from_queryset'):
QueryablePropertiesManager = Manager.from_queryset(QueryablePropertiesQuerySet)
else: # pragma: no cover
class QueryablePropertiesManager(Manager):
def get_queryset(self):
return QueryablePropertiesQuerySet(self.model, using=self._db)
get_query_set = get_queryset
def select_properties(self, *names):
return self.get_queryset().select_properties(*names)
| 51.401384 | 120 | 0.671289 |
from __future__ import unicode_literals
import six
from django.db.models import Manager
from django.db.models.query import QuerySet
from .compat import (ANNOTATION_SELECT_CACHE_NAME, ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP, chain_query, chain_queryset,
ModelIterable, ValuesQuerySet)
from .exceptions import QueryablePropertyDoesNotExist, QueryablePropertyError
from .query import QueryablePropertiesQueryMixin
from .utils import get_queryable_property
from .utils.internal import InjectableMixin, QueryPath, QueryablePropertyReference
class QueryablePropertiesIterable(InjectableMixin):
def __init__(self, queryset, *args, **kwargs):
self.queryset = queryset
if self.__class__.__bases__ != (InjectableMixin,):
super(QueryablePropertiesIterable, self).__init__(queryset, *args, **kwargs)
self.iterable = kwargs.get('iterable') or super(QueryablePropertiesIterable, self).__iter__()
self.yields_model_instances = ((ModelIterable is not None and isinstance(self, ModelIterable)) or
(ValuesQuerySet is not None and not isinstance(self.queryset, ValuesQuerySet)))
def __iter__(self):
original_query = self.queryset.query
try:
self.queryset.query = chain_query(original_query)
final_aliases = self._setup_queryable_properties()
for obj in self.iterable:
if self.yields_model_instances:
for changed_name, property_ref in six.iteritems(final_aliases):
value = getattr(obj, changed_name)
delattr(obj, changed_name)
if property_ref:
property_ref.descriptor.set_cached_value(obj, value)
yield obj
finally:
self.queryset.query = original_query
def _setup_queryable_properties(self):
query = self.queryset.query
final_aliases = {}
select = dict(query.annotation_select)
for property_ref in query._queryable_property_annotations:
annotation_name = six.text_type(property_ref.full_path)
# when it comes to ordering, so queryable property annotations used
# for ordering need special treatment.
order_by_occurrences = []
if ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP: # pragma: no cover
order_by_occurrences = [index for index, field_name in enumerate(query.order_by)
if field_name in (annotation_name, '-{}'.format(annotation_name))]
if order_by_occurrences and annotation_name not in select and annotation_name in query.annotations:
select[annotation_name] = query.annotations[annotation_name]
final_aliases[annotation_name] = None
if not self.yields_model_instances or annotation_name not in select:
# The queryable property annotation does not require selection
# or no renaming needs to occur since the queryset doesn't
continue
changed_name = six.text_type(property_ref.full_path + '')
final_aliases[changed_name] = final_aliases.pop(annotation_name, property_ref)
select[changed_name] = select.pop(annotation_name)
for index in order_by_occurrences:
query.order_by[index] = query.order_by[index].replace(annotation_name, changed_name)
setattr(query, ANNOTATION_SELECT_CACHE_NAME, select)
return final_aliases
class QueryablePropertiesQuerySetMixin(InjectableMixin):
def init_injected_attrs(self):
class_name = 'QueryableProperties' + self.query.__class__.__name__
self.query = QueryablePropertiesQueryMixin.inject_into_object(chain_query(self.query), class_name)
@property
def _iterable_class(self):
cls = self.__dict__['_iterable_class']
return QueryablePropertiesIterable.mix_with_class(cls, 'QueryableProperties' + cls.__name__)
@_iterable_class.setter
def _iterable_class(self, value):
self.__dict__['_iterable_class'] = value
def _clone(self, klass=None, *args, **kwargs):
if klass:
klass = QueryablePropertiesQuerySetMixin.mix_with_class(klass, 'QueryableProperties' + klass.__name__)
args = (klass,) + args
clone = super(QueryablePropertiesQuerySetMixin, self)._clone(*args, **kwargs)
if '_iterable_class' in self.__dict__:
clone._iterable_class = self.__dict__['_iterable_class']
return clone
def _resolve_update_kwargs(self, **kwargs):
original_names = set(kwargs)
for original_name in original_names:
try:
prop = get_queryable_property(self.model, original_name)
except QueryablePropertyDoesNotExist:
continue
if not prop.get_update_kwargs:
raise QueryablePropertyError('Queryable property "{}" does not implement queryset updating.'
.format(prop))
additional_kwargs = self._resolve_update_kwargs(
**prop.get_update_kwargs(self.model, kwargs.pop(original_name)))
for additional_name, value in six.iteritems(additional_kwargs):
if additional_name in kwargs and kwargs[additional_name] != value:
raise QueryablePropertyError(
'Updating queryable property "{prop}" would change field "{field}", but a conflicting value '
'was set for this field by another queryable property or explicitly in the update arguments.'
.format(prop=prop, field=additional_name)
)
kwargs[additional_name] = value
return kwargs
def select_properties(self, *names):
queryset = chain_queryset(self)
for name in names:
property_ref = QueryablePropertyReference(get_queryable_property(self.model, name), self.model, QueryPath())
full_group_by = not getattr(self, '_fields', self.query.select)
with queryset.query._add_queryable_property_annotation(property_ref, full_group_by, select=True):
pass
return queryset
def iterator(self, *args, **kwargs):
iterable = super(QueryablePropertiesQuerySetMixin, self).iterator(*args, **kwargs)
if '_iterable_class' not in self.__dict__:
return iter(QueryablePropertiesIterable(self, iterable=iterable))
return iterable
def update(self, **kwargs):
kwargs = self._resolve_update_kwargs(**kwargs)
return super(QueryablePropertiesQuerySetMixin, self).update(**kwargs)
class QueryablePropertiesQuerySet(QueryablePropertiesQuerySetMixin, QuerySet):
pass
if hasattr(Manager, 'from_queryset'):
QueryablePropertiesManager = Manager.from_queryset(QueryablePropertiesQuerySet)
else:
class QueryablePropertiesManager(Manager):
def get_queryset(self):
return QueryablePropertiesQuerySet(self.model, using=self._db)
get_query_set = get_queryset
def select_properties(self, *names):
return self.get_queryset().select_properties(*names)
| true | true |
f73002d98b59c3477dc664163095a60c163f8748 | 1,765 | py | Python | scripts/postprocess_score.py | sumanthd17/indicTrans | e78ab48d33ffaa51af818e28226b281aae495994 | [
"MIT"
] | null | null | null | scripts/postprocess_score.py | sumanthd17/indicTrans | e78ab48d33ffaa51af818e28226b281aae495994 | [
"MIT"
] | null | null | null | scripts/postprocess_score.py | sumanthd17/indicTrans | e78ab48d33ffaa51af818e28226b281aae495994 | [
"MIT"
] | null | null | null | import sys
def postprocess(
infname, outfname, input_size
):
"""
parse fairseq interactive output, convert script back to native Indic script (in case of Indic languages) and detokenize.
infname: fairseq log file
outfname: output file of translation (sentences not translated contain the dummy string 'DUMMY_OUTPUT'
input_size: expected number of output sentences
"""
consolidated_testoutput = []
# with open(infname,'r',encoding='utf-8') as infile:
# consolidated_testoutput= list(map(lambda x: x.strip(), filter(lambda x: x.startswith('H-'),infile) ))
# consolidated_testoutput.sort(key=lambda x: int(x.split('\t')[0].split('-')[1]))
# consolidated_testoutput=[ x.split('\t')[2] for x in consolidated_testoutput ]
consolidated_testoutput = [(x, 0.0, "") for x in range(input_size)]
temp_testoutput = []
with open(infname, "r", encoding="utf-8") as infile:
temp_testoutput = list(
map(
lambda x: x.strip().split("\t"),
filter(lambda x: x.startswith("H-"), infile),
)
)
temp_testoutput = list(
map(lambda x: (int(x[0].split("-")[1]), float(x[1]), x[2]), temp_testoutput)
)
for sid, score, hyp in temp_testoutput:
consolidated_testoutput[sid] = (sid, score, hyp)
#consolidated_testoutput = [x[2] for x in consolidated_testoutput]
with open(outfname, "w", encoding="utf-8") as outfile:
for (sid, score, hyp) in consolidated_testoutput:
outfile.write("{}\n".format(score))
if __name__ == "__main__":
infname = sys.argv[1]
outfname = sys.argv[2]
input_size = int(sys.argv[3])
postprocess(
infname, outfname, input_size
)
| 36.020408 | 125 | 0.626629 | import sys
def postprocess(
infname, outfname, input_size
):
consolidated_testoutput = []
consolidated_testoutput = [(x, 0.0, "") for x in range(input_size)]
temp_testoutput = []
with open(infname, "r", encoding="utf-8") as infile:
temp_testoutput = list(
map(
lambda x: x.strip().split("\t"),
filter(lambda x: x.startswith("H-"), infile),
)
)
temp_testoutput = list(
map(lambda x: (int(x[0].split("-")[1]), float(x[1]), x[2]), temp_testoutput)
)
for sid, score, hyp in temp_testoutput:
consolidated_testoutput[sid] = (sid, score, hyp)
with open(outfname, "w", encoding="utf-8") as outfile:
for (sid, score, hyp) in consolidated_testoutput:
outfile.write("{}\n".format(score))
if __name__ == "__main__":
infname = sys.argv[1]
outfname = sys.argv[2]
input_size = int(sys.argv[3])
postprocess(
infname, outfname, input_size
)
| true | true |
f730037de960ab141d2243d99c48b409e7c12847 | 424 | py | Python | services/datalad/tests/test_validator.py | build3/openneuro | ae8f6edbab243703b38cefd729629c1741eb3839 | [
"MIT"
] | null | null | null | services/datalad/tests/test_validator.py | build3/openneuro | ae8f6edbab243703b38cefd729629c1741eb3839 | [
"MIT"
] | 1 | 2020-09-25T11:06:37.000Z | 2020-09-25T11:06:37.000Z | services/datalad/tests/test_validator.py | adswa/openneuro | 64e7fdbeb8b3c567c340b80f22a6134e4ee8070a | [
"MIT"
] | null | null | null | import json
from .dataset_fixtures import *
from datalad_service.tasks.validator import validate_dataset_sync
def test_validator(new_dataset):
results = validate_dataset_sync(new_dataset.path, 'HEAD')
# new_dataset doesn't pass validation, should return an error
assert 'issues' in results
assert 'errors' in results['issues']
assert results['issues']['errors'][0]['key'] == 'QUICK_VALIDATION_FAILED'
| 32.615385 | 77 | 0.757075 | import json
from .dataset_fixtures import *
from datalad_service.tasks.validator import validate_dataset_sync
def test_validator(new_dataset):
results = validate_dataset_sync(new_dataset.path, 'HEAD')
assert 'issues' in results
assert 'errors' in results['issues']
assert results['issues']['errors'][0]['key'] == 'QUICK_VALIDATION_FAILED'
| true | true |
f73003dbae406346ceffaad933d70e33d38fff14 | 190 | py | Python | cride/circles/apps.py | jpcano1/cride-platzi | 6548b5a4c42c2acc9c888f93d5479be9c8b7e6d7 | [
"MIT"
] | null | null | null | cride/circles/apps.py | jpcano1/cride-platzi | 6548b5a4c42c2acc9c888f93d5479be9c8b7e6d7 | [
"MIT"
] | null | null | null | cride/circles/apps.py | jpcano1/cride-platzi | 6548b5a4c42c2acc9c888f93d5479be9c8b7e6d7 | [
"MIT"
] | null | null | null | """ Circles app """
# Django
from django.apps import AppConfig
class CirclesAppConfig(AppConfig):
""" Circles app config. """
name = 'cride.circles'
verbose_name = 'Circles'
| 15.833333 | 34 | 0.663158 |
from django.apps import AppConfig
class CirclesAppConfig(AppConfig):
name = 'cride.circles'
verbose_name = 'Circles'
| true | true |
f730044e72d0f566ba1c776e68f1ecc503b9cd45 | 7,888 | py | Python | modules/mdebugger/attachment_marker/motionsense.py | MD2Korg/CerebralCortex-DataAnalysis | 73f5ea2430bc7c23de422dccb7b65ef9f8917595 | [
"BSD-2-Clause"
] | 1 | 2018-04-24T18:11:24.000Z | 2018-04-24T18:11:24.000Z | modules/mdebugger/attachment_marker/motionsense.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 10 | 2018-03-13T19:04:09.000Z | 2018-05-12T01:40:03.000Z | modules/mdebugger/attachment_marker/motionsense.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 42 | 2017-12-07T17:08:14.000Z | 2019-06-02T08:25:12.000Z | # Copyright (c) 2017, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import uuid
from collections import OrderedDict
from cerebralcortex.cerebralcortex import CerebralCortex
from modules.mdebugger.post_processing import get_execution_context, get_annotations
from modules.mdebugger.post_processing import store
from modules.mdebugger.util import get_stream_days
from modules.mdebugger.util import merge_consective_windows
from core.signalprocessing.window import window
from cerebralcortex.core.data_manager.raw.stream_handler import DataSet
def attachment_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid, dd_stream_name, CC: CerebralCortex,
config: dict):
"""
Label sensor data as sensor-on-body, sensor-off-body, or improper-attachment.
All the labeled data (st, et, label) with its metadata are then stored in a datastore
"""
# TODO: quality streams could be multiple so find the one computed with CC
# using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
attachment_marker_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id+"ATTACHMENT MARKER"))
stream_days = get_stream_days(raw_stream_id, attachment_marker_stream_id, CC)
for day in stream_days:
# load stream data to be diagnosed
raw_stream = CC.get_stream(raw_stream_id, day=day, data_type=DataSet.COMPLETE)
if len(raw_stream.data) > 0:
windowed_data = window(raw_stream.data, config['general']['window_size'], True)
results = process_windows(windowed_data, config)
merged_windows = merge_consective_windows(results)
input_streams = [{"owner_id": owner_id, "id": str(raw_stream_id), "name": stream_name}]
output_stream = {"id": attachment_marker_stream_id, "name": dd_stream_name,
"algo_type": config["algo_type"]["attachment_marker"]}
metadata = get_metadata(dd_stream_name, input_streams, config)
store(merged_windows, input_streams, output_stream, metadata, CC, config)
def process_windows(windowed_data: OrderedDict, config: dict) -> OrderedDict:
"""
:param windowed_data:
:param config:
:return:
"""
results = OrderedDict()
threshold_improper_attachment = config['attachment_marker']['motionsense_improper_attachment']
threshold_onbody = config['attachment_marker']['motionsense_onbody']
threshold_offbody = config['attachment_marker']['motionsense_offbody']
label_improper_attachment = config['labels']['motionsense_improper_attachment']
label_onbody = config['labels']['motionsense_onbody']
label_offbody = config['labels']['motionsense_offbody']
if windowed_data:
for key, data in windowed_data.items():
one_minute_window = 0
for k in data:
if k.sample == 0:
one_minute_window += 1
if (one_minute_window / 20) > threshold_offbody and (
one_minute_window / 20) < threshold_improper_attachment:
results[key] = label_improper_attachment
elif (one_minute_window / 20) > threshold_onbody:
results[key] = label_onbody
else:
results[key] = label_offbody
return results
def get_metadata(dd_stream_name: str, input_streams: dict, config: dict) -> dict:
"""
:param generated_stream_id:
:param dd_stream_name:
:param input_streams:
:param config:
:return:
"""
if dd_stream_name == config["stream_names"]["autosense_rip_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"onbody_threshold": config["attachment_marker"]["rip_on_body"],
"improper_attachment": config["attachment_marker"]["improper_attachment"]}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["rip_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["rip_off_body"]) + ", Onbody: " + str(config["labels"]["rip_on_body"])}
elif dd_stream_name == config["stream_names"]["autosense_ecg_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"ecg_vairance_threshold": config["attachment_marker"]["ecg_on_body"],
"improper_attachment": config["attachment_marker"]["improper_attachment"]}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["ecg_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["ecg_off_body"]) + ", Onbody: " + str(config["labels"]["ecg_on_body"])}
elif dd_stream_name == config["stream_names"]["motionsense_hrv_right_attachment_marker"] or dd_stream_name == \
config["stream_names"]["motionsense_hrv_left_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"motionsense_improper_attachment_threshold": config["attachment_marker"][
"motionsense_improper_attachment"],
"motionsense_onbody_threshold": config["attachment_marker"]["motionsense_onbody"],
"motionsense_offbody_threshold": config["attachment_marker"]["motionsense_offbody"]
}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["motionsense_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["motionsense_offbody"]) + ", Onbody: " + str(
config["labels"]["motionsense_onbody"])}
else:
raise ValueError("Incorrect sensor type")
method = 'cerebralcortex.data_processor.data_diagnostic.attachment_marker'
algo_description = config["description"]["attachment_marker"]
ec = get_execution_context(dd_stream_name, input_param, input_streams, method,
algo_description, config)
anno = get_annotations()
return {"ec": ec, "dd": data_descriptor, "anno": anno}
| 54.4 | 132 | 0.676344 |
import uuid
from collections import OrderedDict
from cerebralcortex.cerebralcortex import CerebralCortex
from modules.mdebugger.post_processing import get_execution_context, get_annotations
from modules.mdebugger.post_processing import store
from modules.mdebugger.util import get_stream_days
from modules.mdebugger.util import merge_consective_windows
from core.signalprocessing.window import window
from cerebralcortex.core.data_manager.raw.stream_handler import DataSet
def attachment_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid, dd_stream_name, CC: CerebralCortex,
config: dict):
attachment_marker_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id+"ATTACHMENT MARKER"))
stream_days = get_stream_days(raw_stream_id, attachment_marker_stream_id, CC)
for day in stream_days:
raw_stream = CC.get_stream(raw_stream_id, day=day, data_type=DataSet.COMPLETE)
if len(raw_stream.data) > 0:
windowed_data = window(raw_stream.data, config['general']['window_size'], True)
results = process_windows(windowed_data, config)
merged_windows = merge_consective_windows(results)
input_streams = [{"owner_id": owner_id, "id": str(raw_stream_id), "name": stream_name}]
output_stream = {"id": attachment_marker_stream_id, "name": dd_stream_name,
"algo_type": config["algo_type"]["attachment_marker"]}
metadata = get_metadata(dd_stream_name, input_streams, config)
store(merged_windows, input_streams, output_stream, metadata, CC, config)
def process_windows(windowed_data: OrderedDict, config: dict) -> OrderedDict:
results = OrderedDict()
threshold_improper_attachment = config['attachment_marker']['motionsense_improper_attachment']
threshold_onbody = config['attachment_marker']['motionsense_onbody']
threshold_offbody = config['attachment_marker']['motionsense_offbody']
label_improper_attachment = config['labels']['motionsense_improper_attachment']
label_onbody = config['labels']['motionsense_onbody']
label_offbody = config['labels']['motionsense_offbody']
if windowed_data:
for key, data in windowed_data.items():
one_minute_window = 0
for k in data:
if k.sample == 0:
one_minute_window += 1
if (one_minute_window / 20) > threshold_offbody and (
one_minute_window / 20) < threshold_improper_attachment:
results[key] = label_improper_attachment
elif (one_minute_window / 20) > threshold_onbody:
results[key] = label_onbody
else:
results[key] = label_offbody
return results
def get_metadata(dd_stream_name: str, input_streams: dict, config: dict) -> dict:
if dd_stream_name == config["stream_names"]["autosense_rip_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"onbody_threshold": config["attachment_marker"]["rip_on_body"],
"improper_attachment": config["attachment_marker"]["improper_attachment"]}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["rip_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["rip_off_body"]) + ", Onbody: " + str(config["labels"]["rip_on_body"])}
elif dd_stream_name == config["stream_names"]["autosense_ecg_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"ecg_vairance_threshold": config["attachment_marker"]["ecg_on_body"],
"improper_attachment": config["attachment_marker"]["improper_attachment"]}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["ecg_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["ecg_off_body"]) + ", Onbody: " + str(config["labels"]["ecg_on_body"])}
elif dd_stream_name == config["stream_names"]["motionsense_hrv_right_attachment_marker"] or dd_stream_name == \
config["stream_names"]["motionsense_hrv_left_attachment_marker"]:
input_param = {"window_size": config["general"]["window_size"],
"motionsense_improper_attachment_threshold": config["attachment_marker"][
"motionsense_improper_attachment"],
"motionsense_onbody_threshold": config["attachment_marker"]["motionsense_onbody"],
"motionsense_offbody_threshold": config["attachment_marker"]["motionsense_offbody"]
}
data_descriptor = {"NAME": dd_stream_name, "DATA_TYPE": "int",
"DESCRIPTION": "Attachment labels: Improper attachment: " + str(
config["labels"]["motionsense_improper_attachment"]) + ", Offbody: " + str(
config["labels"]["motionsense_offbody"]) + ", Onbody: " + str(
config["labels"]["motionsense_onbody"])}
else:
raise ValueError("Incorrect sensor type")
method = 'cerebralcortex.data_processor.data_diagnostic.attachment_marker'
algo_description = config["description"]["attachment_marker"]
ec = get_execution_context(dd_stream_name, input_param, input_streams, method,
algo_description, config)
anno = get_annotations()
return {"ec": ec, "dd": data_descriptor, "anno": anno}
| true | true |
f73004f4a82f8a65d0815fa2c8179029a64348c3 | 17,171 | py | Python | aiida/backends/tests/backup_script.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/tests/backup_script.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/tests/backup_script.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import datetime
import importlib
import shutil
import sys
import tempfile
from dateutil.parser import parse
from aiida.backends.utils import is_dbenv_loaded, load_dbenv, BACKEND_SQLA, BACKEND_DJANGO
from aiida.backends.settings import BACKEND
from aiida.backends.testbase import AiidaTestCase
from aiida.common import utils
from aiida.common.additions.backup_script import backup_setup
from aiida.orm.node import Node
import aiida.utils.json as json
if not is_dbenv_loaded():
load_dbenv()
class TestBackupScriptUnit(AiidaTestCase):
_json_test_input_1 = '{"backup_length_threshold": 2, "periodicity": 2,' + \
' "oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": null, "backup_dir": ' +\
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_2 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": null, "backup_dir": ' +\
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_3 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": null, "days_to_backup": 2, "backup_dir": ' + \
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_4 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484+00:00", ' + \
'"days_to_backup": null, "backup_dir": ' + \
'"/scratch/aiida_user/backupScriptDest"}'
_json_test_input_5 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484+00:00", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484+00:00", ' + \
'"days_to_backup": 2, "backup_dir": "/scratch/aiida_user/backup"}'
_json_test_input_6 = '{"backup_length_threshold": 2, "periodicity": 2, ' +\
'"oldest_object_backedup": "2014-07-18 13:54:53.688484", ' + \
'"end_date_of_backup": "2014-07-22 14:54:53.688484", ' + \
'"days_to_backup": null, ' \
'"backup_dir": "/scratch/./aiida_user////backup//"}'
def setUp(self):
super(TestBackupScriptUnit, self).setUp()
if not is_dbenv_loaded():
load_dbenv()
if BACKEND == BACKEND_SQLA:
from aiida.common.additions.backup_script.backup_sqlalchemy import Backup
elif BACKEND == BACKEND_DJANGO:
from aiida.common.additions.backup_script.backup_django import Backup
else:
self.skipTest("Unknown backend")
self._backup_setup_inst = Backup("", 2)
def tearDown(self):
super(TestBackupScriptUnit, self).tearDown()
self._backup_setup_inst = None
def test_loading_basic_params_from_file(self):
"""
This method tests the correct loading of the basic _backup_setup_inst
parameters from a JSON string.
"""
backup_variables = json.loads(self._json_test_input_1)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._oldest_object_bk,
parse("2014-07-18 13:54:53.688484+00:00"),
"Last _backup_setup_inst start date is not parsed correctly")
# The destination directory of the _backup_setup_inst
self.assertEqual(
self._backup_setup_inst._backup_dir,
"/scratch/aiida_user/backupScriptDest",
"_backup_setup_inst destination directory not parsed correctly")
self.assertEqual(
self._backup_setup_inst._backup_length_threshold,
datetime.timedelta(hours=2),
"_backup_length_threshold not parsed correctly")
self.assertEqual(
self._backup_setup_inst._periodicity,
2,
"_periodicity not parsed correctly")
def test_loading_backup_time_params_from_file_1(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, no _backup_setup_inst end limits are set
"""
backup_variables = json.loads(self._json_test_input_2)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
None,
"_days_to_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
None,
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
None,
"_internal_end_date_of_backup should be None/null but it is not")
def test_loading_backup_time_params_from_file_2(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, only the daysToBackup limit is set.
"""
backup_variables = json.loads(self._json_test_input_3)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
2,
"_days_to_backup should be 2 but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
None,
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
parse("2014-07-20 13:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_3(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, only the endDateOfBackup limit is set.
"""
backup_variables = json.loads(self._json_test_input_4)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
None,
"_days_to_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_4(self):
"""
This method tests that the _backup_setup_inst limits are correctly
loaded from the JSON string and are correctly set.
In the parsed JSON string, the endDateOfBackup & daysToBackuplimit
are set which should lead to an exception.
"""
from aiida.common.additions.backup_script.backup_base import BackupError
backup_variables = json.loads(self._json_test_input_5)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
# An exception should be raised because endDateOfBackup
# & daysToBackuplimit have been defined in the same time.
with self.assertRaises(BackupError):
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
def check_full_deserialization_serialization(self, input_string, backup_inst):
input_variables = json.loads(input_string)
backup_inst._ignore_backup_dir_existence_check = True
backup_inst._read_backup_info_from_dict(input_variables)
target_variables = backup_inst._dictionarize_backup_info()
self.assertEqual(input_variables, target_variables,
"The test string {} did not succeed".format(
input_string) +
" the serialization deserialization test.\n" +
"Input variables: {}\n".format(input_variables) +
"Output variables: {}\n".format(target_variables))
def test_full_deserialization_serialization_1(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_1
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_2(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_2
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_3(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_3
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_4(self):
"""
This method tests the correct deserialization / serialization of the
variables that should be stored in a file.
"""
input_string = self._json_test_input_4
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_timezone_addition_and_dir_correction(self):
"""
This method tests if the timezone is added correctly to timestamps
that don't have a timezone. Moreover, it checks if the given directory
paths are normalized as expected.
"""
backup_variables = json.loads(self._json_test_input_6)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertIsNotNone(
self._backup_setup_inst._oldest_object_bk.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._oldest_object_bk))
self.assertIsNotNone(
self._backup_setup_inst._end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._end_date_of_backup))
self.assertIsNotNone(
self._backup_setup_inst._internal_end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._internal_end_date_of_backup))
# The destination directory of the _backup_setup_inst
self.assertEqual(
self._backup_setup_inst._backup_dir,
"/scratch/aiida_user/backup",
"_backup_setup_inst destination directory is "
"not normalized as expected.")
class TestBackupScriptIntegration(AiidaTestCase):
_aiida_rel_path = ".aiida"
_backup_rel_path = "backup"
_repo_rel_path = "repository"
_bs_instance = backup_setup.BackupSetup()
def test_integration(self):
from aiida.utils.capturing import Capturing
# Fill in the repository with data
self.fill_repo()
try:
# Create a temp folder where the backup files will be placed
# and the backup will be stored
temp_folder = tempfile.mkdtemp()
# Capture the sysout of the following command
with Capturing():
# Create the backup scripts
backup_full_path = self.create_backup_scripts(temp_folder)
# Put the backup folder in the path
sys.path.append(backup_full_path)
# Import the backup script - this action will also run it
# It is assumed that the backup script ends with .py
importlib.import_module(self._bs_instance._script_filename[:-3])
# Check the backup
from aiida import settings
from filecmp import dircmp
import os
from aiida.common.utils import are_dir_trees_equal
source_dir = os.path.join(settings.REPOSITORY_PATH,
self._repo_rel_path)
dest_dir = os.path.join(backup_full_path,
self._bs_instance._file_backup_folder_rel,
self._repo_rel_path)
res, msg = are_dir_trees_equal(source_dir, dest_dir)
self.assertTrue(res, "The backed-up repository has differences to the original one. " + str(msg)
+ ". If the test fails, report it in issue #2134.")
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def fill_repo(self):
from aiida.orm import JobCalculation, CalculationFactory, Data, DataFactory
extra_name = self.__class__.__name__ + "/test_with_subclasses"
calc_params = {
'computer': self.computer,
'resources': {'num_machines': 1,
'num_mpiprocs_per_machine': 1}
}
TemplateReplacerCalc = CalculationFactory('simpleplugins.templatereplacer')
ParameterData = DataFactory('parameter')
a1 = JobCalculation(**calc_params).store()
# To query only these nodes later
a1.set_extra(extra_name, True)
a2 = TemplateReplacerCalc(**calc_params).store()
# To query only these nodes later
a2.set_extra(extra_name, True)
a3 = Data().store()
a3.set_extra(extra_name, True)
a4 = ParameterData(dict={'a': 'b'}).store()
a4.set_extra(extra_name, True)
a5 = Node().store()
a5.set_extra(extra_name, True)
# I don't set the extras, just to be sure that the filtering works
# The filtering is needed because other tests will put stuff int he DB
a6 = JobCalculation(**calc_params)
a6.store()
a7 = Node()
a7.store()
def create_backup_scripts(self, tmp_folder):
backup_full_path = "{}/{}/{}/".format(tmp_folder, self._aiida_rel_path,
self._backup_rel_path)
# The predefined answers for the setup script
ac = utils.ArrayCounter()
answers = [backup_full_path, # the backup folder path
"", # should the folder be created?
"", # destination folder of the backup
"", # should the folder be created?
"n", # print config explanation?
"", # configure the backup conf file now?
"", # start date of backup?
"", # is it correct?
"", # days to backup?
"", # is it correct?
"", # end date of backup
"", # is it correct?
"1", # periodicity
"", # is it correct?
"0", # threshold?
""] # is it correct?
utils.input = lambda _: answers[ac.array_counter()]
# Run the setup script
self._bs_instance.run()
return backup_full_path
| 42.502475 | 108 | 0.632695 |
up_inst._internal_end_date_of_backup,
parse("2014-07-20 13:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_3(self):
backup_variables = json.loads(self._json_test_input_4)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertEqual(
self._backup_setup_inst._days_to_backup,
None,
"_days_to_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_end_date_of_backup should be None/null but it is not")
self.assertEqual(
self._backup_setup_inst._internal_end_date_of_backup,
parse("2014-07-22 14:54:53.688484+00:00"),
"_internal_end_date_of_backup is not the expected one")
def test_loading_backup_time_params_from_file_4(self):
from aiida.common.additions.backup_script.backup_base import BackupError
backup_variables = json.loads(self._json_test_input_5)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
with self.assertRaises(BackupError):
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
def check_full_deserialization_serialization(self, input_string, backup_inst):
input_variables = json.loads(input_string)
backup_inst._ignore_backup_dir_existence_check = True
backup_inst._read_backup_info_from_dict(input_variables)
target_variables = backup_inst._dictionarize_backup_info()
self.assertEqual(input_variables, target_variables,
"The test string {} did not succeed".format(
input_string) +
" the serialization deserialization test.\n" +
"Input variables: {}\n".format(input_variables) +
"Output variables: {}\n".format(target_variables))
def test_full_deserialization_serialization_1(self):
input_string = self._json_test_input_1
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_2(self):
input_string = self._json_test_input_2
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_3(self):
input_string = self._json_test_input_3
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_full_deserialization_serialization_4(self):
input_string = self._json_test_input_4
backup_inst = self._backup_setup_inst
self.check_full_deserialization_serialization(input_string, backup_inst)
def test_timezone_addition_and_dir_correction(self):
backup_variables = json.loads(self._json_test_input_6)
self._backup_setup_inst._ignore_backup_dir_existence_check = True
self._backup_setup_inst._read_backup_info_from_dict(backup_variables)
self.assertIsNotNone(
self._backup_setup_inst._oldest_object_bk.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._oldest_object_bk))
self.assertIsNotNone(
self._backup_setup_inst._end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._end_date_of_backup))
self.assertIsNotNone(
self._backup_setup_inst._internal_end_date_of_backup.tzinfo,
"Timezone info should not be none (timestamp: {})."
.format(self._backup_setup_inst._internal_end_date_of_backup))
self.assertEqual(
self._backup_setup_inst._backup_dir,
"/scratch/aiida_user/backup",
"_backup_setup_inst destination directory is "
"not normalized as expected.")
class TestBackupScriptIntegration(AiidaTestCase):
_aiida_rel_path = ".aiida"
_backup_rel_path = "backup"
_repo_rel_path = "repository"
_bs_instance = backup_setup.BackupSetup()
def test_integration(self):
from aiida.utils.capturing import Capturing
self.fill_repo()
try:
temp_folder = tempfile.mkdtemp()
with Capturing():
backup_full_path = self.create_backup_scripts(temp_folder)
sys.path.append(backup_full_path)
importlib.import_module(self._bs_instance._script_filename[:-3])
from aiida import settings
from filecmp import dircmp
import os
from aiida.common.utils import are_dir_trees_equal
source_dir = os.path.join(settings.REPOSITORY_PATH,
self._repo_rel_path)
dest_dir = os.path.join(backup_full_path,
self._bs_instance._file_backup_folder_rel,
self._repo_rel_path)
res, msg = are_dir_trees_equal(source_dir, dest_dir)
self.assertTrue(res, "The backed-up repository has differences to the original one. " + str(msg)
+ ". If the test fails, report it in issue #2134.")
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def fill_repo(self):
from aiida.orm import JobCalculation, CalculationFactory, Data, DataFactory
extra_name = self.__class__.__name__ + "/test_with_subclasses"
calc_params = {
'computer': self.computer,
'resources': {'num_machines': 1,
'num_mpiprocs_per_machine': 1}
}
TemplateReplacerCalc = CalculationFactory('simpleplugins.templatereplacer')
ParameterData = DataFactory('parameter')
a1 = JobCalculation(**calc_params).store()
a1.set_extra(extra_name, True)
a2 = TemplateReplacerCalc(**calc_params).store()
a2.set_extra(extra_name, True)
a3 = Data().store()
a3.set_extra(extra_name, True)
a4 = ParameterData(dict={'a': 'b'}).store()
a4.set_extra(extra_name, True)
a5 = Node().store()
a5.set_extra(extra_name, True)
# The filtering is needed because other tests will put stuff int he DB
a6 = JobCalculation(**calc_params)
a6.store()
a7 = Node()
a7.store()
def create_backup_scripts(self, tmp_folder):
backup_full_path = "{}/{}/{}/".format(tmp_folder, self._aiida_rel_path,
self._backup_rel_path)
# The predefined answers for the setup script
ac = utils.ArrayCounter()
answers = [backup_full_path, # the backup folder path
"", # should the folder be created?
"", # destination folder of the backup
"", # should the folder be created?
"n", # print config explanation?
"", # configure the backup conf file now?
"", # start date of backup?
"", # is it correct?
"", # days to backup?
"", # is it correct?
"", # end date of backup
"", # is it correct?
"1", # periodicity
"", # is it correct?
"0", # threshold?
""] # is it correct?
utils.input = lambda _: answers[ac.array_counter()]
# Run the setup script
self._bs_instance.run()
return backup_full_path
| true | true |
f73005b31846f4a78636353fe91ff4e0db98bd66 | 221 | pyde | Python | sketch_17/sketch_17.pyde | Minindosyan/2019-fall-polytech-cs | f022362e4bf6d69d623d3212df9b038f7abf5790 | [
"MIT"
] | null | null | null | sketch_17/sketch_17.pyde | Minindosyan/2019-fall-polytech-cs | f022362e4bf6d69d623d3212df9b038f7abf5790 | [
"MIT"
] | null | null | null | sketch_17/sketch_17.pyde | Minindosyan/2019-fall-polytech-cs | f022362e4bf6d69d623d3212df9b038f7abf5790 | [
"MIT"
] | null | null | null | def setup():
size(500,500)
smooth()
background(235)
strokeWeight(30)
noLoop()
def draw():
for i in range(1,8):
stroke(20)
line(i*50,200,150+(i-1)*50,300)
| 17 | 40 | 0.475113 | def setup():
size(500,500)
smooth()
background(235)
strokeWeight(30)
noLoop()
def draw():
for i in range(1,8):
stroke(20)
line(i*50,200,150+(i-1)*50,300)
| true | true |
f73005c3d0ecb0b6aa0d1b17a014e3ce2b1283cd | 2,751 | py | Python | task_manager/tasks/migrations/0001_initial.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | task_manager/tasks/migrations/0001_initial.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | task_manager/tasks/migrations/0001_initial.py | Ritesh-Aggarwal/Task-Manager-Django | b8f8df10b0b0a9cc9cd27346a0b5d4d5892d2f24 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-04 13:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tasks.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('completed', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('priority', models.IntegerField(default=0)),
('status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ReportSchedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('report_at', models.TimeField(default=tasks.models.default_start_time)),
('last_run_at', models.DateTimeField(default=tasks.models.default_last_runtime)),
('email', models.EmailField(max_length=254)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('new_status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('updated_at', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tasks.task')),
],
),
]
| 51.90566 | 206 | 0.616503 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import tasks.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('completed', models.BooleanField(default=False)),
('created_date', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('priority', models.IntegerField(default=0)),
('status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ReportSchedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('report_at', models.TimeField(default=tasks.models.default_start_time)),
('last_run_at', models.DateTimeField(default=tasks.models.default_last_runtime)),
('email', models.EmailField(max_length=254)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('new_status', models.CharField(choices=[('PENDING', 'PENDING'), ('IN_PROGRESS', 'IN_PROGRESS'), ('COMPLETED', 'COMPLETED'), ('CANCELLED', 'CANCELLED')], default='PENDING', max_length=100)),
('updated_at', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tasks.task')),
],
),
]
| true | true |
f73006eb526b889195551e800d11eb06f680327c | 5,986 | py | Python | adb/windows/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 1 | 2019-01-17T19:03:17.000Z | 2019-01-17T19:03:17.000Z | adb/MACOS/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 2 | 2017-09-08T20:26:05.000Z | 2017-09-08T20:29:07.000Z | adb/windows/platform-tools/systrace/catapult/telemetry/telemetry/internal/platform/linux_based_platform_backend.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
import resource # pylint: disable=import-error
except ImportError:
resource = None # Not available on all platforms
import re
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.platform import platform_backend
class LinuxBasedPlatformBackend(platform_backend.PlatformBackend):
"""Abstract platform containing functionality domain.shared by all Linux based OSes.
This includes Android and ChromeOS.
Subclasses must implement RunCommand, GetFileContents, GetPsOutput, and
ParseCStateSample."""
# Get the commit charge in kB.
def GetSystemCommitCharge(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return (self._ConvertToKb(meminfo['MemTotal'])
- self._ConvertToKb(meminfo['MemFree'])
- self._ConvertToKb(meminfo['Buffers'])
- self._ConvertToKb(meminfo['Cached']))
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return self._ConvertToBytes(meminfo['MemTotal'])
def GetCpuStats(self, pid):
results = {}
stats = self._GetProcFileForPid(pid, 'stat')
if not stats:
return results
stats = stats.split()
utime = float(stats[13])
stime = float(stats[14])
cpu_process_jiffies = utime + stime
clock_ticks = self.GetClockTicks()
results.update({'CpuProcessTime': cpu_process_jiffies / clock_ticks})
return results
def GetCpuTimestamp(self):
total_jiffies = self._GetProcJiffies()
clock_ticks = self.GetClockTicks()
return {'TotalTime': total_jiffies / clock_ticks}
@decorators.Deprecated(
2017, 11, 4,
'Clients should use tracing and memory-infra in new Telemetry '
'benchmarks. See for context: https://crbug.com/632021')
def GetMemoryStats(self, pid):
status_contents = self._GetProcFileForPid(pid, 'status')
stats = self._GetProcFileForPid(pid, 'stat').split()
status = self._GetProcFileDict(status_contents)
if not status or not stats or 'Z' in status['State']:
return {}
vm = int(stats[22])
vm_peak = (self._ConvertToBytes(status['VmPeak'])
if 'VmPeak' in status else vm)
wss = int(stats[23]) * resource.getpagesize()
wss_peak = (self._ConvertToBytes(status['VmHWM'])
if 'VmHWM' in status else wss)
private_dirty_bytes = 0
for line in self._GetProcFileForPid(pid, 'smaps').splitlines():
if line.startswith('Private_Dirty:'):
private_dirty_bytes += self._ConvertToBytes(line.split(':')[1].strip())
return {'VM': vm,
'VMPeak': vm_peak,
'PrivateDirty': private_dirty_bytes,
'WorkingSetSize': wss,
'WorkingSetSizePeak': wss_peak}
@decorators.Cache
def GetClockTicks(self):
"""Returns the number of clock ticks per second.
The proper way is to call os.sysconf('SC_CLK_TCK') but that is not easy to
do on Android/CrOS. In practice, nearly all Linux machines have a USER_HZ
of 100, so just return that.
"""
return 100
def GetFileContents(self, filename):
raise NotImplementedError()
def GetPsOutput(self, columns, pid=None):
raise NotImplementedError()
def RunCommand(self, cmd):
"""Runs the specified command.
Args:
cmd: A list of program arguments or the path string of the program.
Returns:
A string whose content is the output of the command.
"""
raise NotImplementedError()
@staticmethod
def ParseCStateSample(sample):
"""Parse a single c-state residency sample.
Args:
sample: A sample of c-state residency times to be parsed. Organized as
a dictionary mapping CPU name to a string containing all c-state
names, the times in each state, the latency of each state, and the
time at which the sample was taken all separated by newlines.
Ex: {'cpu0': 'C0\nC1\n5000\n2000\n20\n30\n1406673171'}
Returns:
Dictionary associating a c-state with a time.
"""
raise NotImplementedError()
def _IsPidAlive(self, pid):
assert pid, 'pid is required'
return bool(self.GetPsOutput(['pid'], pid) == str(pid))
def _GetProcFileForPid(self, pid, filename):
try:
return self.GetFileContents('/proc/%s/%s' % (pid, filename))
except IOError:
if not self._IsPidAlive(pid):
raise exceptions.ProcessGoneException()
raise
def _ConvertToKb(self, value):
return int(value.replace('kB', ''))
def _ConvertToBytes(self, value):
return self._ConvertToKb(value) * 1024
def _GetProcFileDict(self, contents):
retval = {}
for line in contents.splitlines():
key, value = line.split(':')
retval[key.strip()] = value.strip()
return retval
def _GetProcJiffies(self):
"""Parse '/proc/timer_list' output and returns the first jiffies attribute.
Multi-CPU machines will have multiple 'jiffies:' lines, all of which will be
essentially the same. Return the first one."""
jiffies_timer_lines = self.RunCommand(
['grep', 'jiffies', '/proc/timer_list'])
if not jiffies_timer_lines:
raise Exception('Unable to find jiffies from /proc/timer_list')
jiffies_timer_list = jiffies_timer_lines.splitlines()
# Each line should look something like 'jiffies: 4315883489'.
for line in jiffies_timer_list:
match = re.match(r'\s*jiffies\s*:\s*(\d+)', line)
if match:
value = match.group(1)
return float(value)
raise Exception('Unable to parse jiffies attribute: %s' %
repr(jiffies_timer_lines))
| 34.011364 | 86 | 0.683762 |
try:
import resource
except ImportError:
resource = None
import re
from telemetry.core import exceptions
from telemetry import decorators
from telemetry.internal.platform import platform_backend
class LinuxBasedPlatformBackend(platform_backend.PlatformBackend):
def GetSystemCommitCharge(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return (self._ConvertToKb(meminfo['MemTotal'])
- self._ConvertToKb(meminfo['MemFree'])
- self._ConvertToKb(meminfo['Buffers'])
- self._ConvertToKb(meminfo['Cached']))
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return self._ConvertToBytes(meminfo['MemTotal'])
def GetCpuStats(self, pid):
results = {}
stats = self._GetProcFileForPid(pid, 'stat')
if not stats:
return results
stats = stats.split()
utime = float(stats[13])
stime = float(stats[14])
cpu_process_jiffies = utime + stime
clock_ticks = self.GetClockTicks()
results.update({'CpuProcessTime': cpu_process_jiffies / clock_ticks})
return results
def GetCpuTimestamp(self):
total_jiffies = self._GetProcJiffies()
clock_ticks = self.GetClockTicks()
return {'TotalTime': total_jiffies / clock_ticks}
@decorators.Deprecated(
2017, 11, 4,
'Clients should use tracing and memory-infra in new Telemetry '
'benchmarks. See for context: https://crbug.com/632021')
def GetMemoryStats(self, pid):
status_contents = self._GetProcFileForPid(pid, 'status')
stats = self._GetProcFileForPid(pid, 'stat').split()
status = self._GetProcFileDict(status_contents)
if not status or not stats or 'Z' in status['State']:
return {}
vm = int(stats[22])
vm_peak = (self._ConvertToBytes(status['VmPeak'])
if 'VmPeak' in status else vm)
wss = int(stats[23]) * resource.getpagesize()
wss_peak = (self._ConvertToBytes(status['VmHWM'])
if 'VmHWM' in status else wss)
private_dirty_bytes = 0
for line in self._GetProcFileForPid(pid, 'smaps').splitlines():
if line.startswith('Private_Dirty:'):
private_dirty_bytes += self._ConvertToBytes(line.split(':')[1].strip())
return {'VM': vm,
'VMPeak': vm_peak,
'PrivateDirty': private_dirty_bytes,
'WorkingSetSize': wss,
'WorkingSetSizePeak': wss_peak}
@decorators.Cache
def GetClockTicks(self):
return 100
def GetFileContents(self, filename):
raise NotImplementedError()
def GetPsOutput(self, columns, pid=None):
raise NotImplementedError()
def RunCommand(self, cmd):
raise NotImplementedError()
@staticmethod
def ParseCStateSample(sample):
raise NotImplementedError()
def _IsPidAlive(self, pid):
assert pid, 'pid is required'
return bool(self.GetPsOutput(['pid'], pid) == str(pid))
def _GetProcFileForPid(self, pid, filename):
try:
return self.GetFileContents('/proc/%s/%s' % (pid, filename))
except IOError:
if not self._IsPidAlive(pid):
raise exceptions.ProcessGoneException()
raise
def _ConvertToKb(self, value):
return int(value.replace('kB', ''))
def _ConvertToBytes(self, value):
return self._ConvertToKb(value) * 1024
def _GetProcFileDict(self, contents):
retval = {}
for line in contents.splitlines():
key, value = line.split(':')
retval[key.strip()] = value.strip()
return retval
def _GetProcJiffies(self):
jiffies_timer_lines = self.RunCommand(
['grep', 'jiffies', '/proc/timer_list'])
if not jiffies_timer_lines:
raise Exception('Unable to find jiffies from /proc/timer_list')
jiffies_timer_list = jiffies_timer_lines.splitlines()
for line in jiffies_timer_list:
match = re.match(r'\s*jiffies\s*:\s*(\d+)', line)
if match:
value = match.group(1)
return float(value)
raise Exception('Unable to parse jiffies attribute: %s' %
repr(jiffies_timer_lines))
| true | true |
f73008d4776542c97030787b5e4f290a43754608 | 267 | py | Python | devel/test_wv.py | binnietom/py21cmmc_wv-1 | 2d5405700c1d99bd5f22c762999aea89d1ca1c23 | [
"MIT"
] | null | null | null | devel/test_wv.py | binnietom/py21cmmc_wv-1 | 2d5405700c1d99bd5f22c762999aea89d1ca1c23 | [
"MIT"
] | null | null | null | devel/test_wv.py | binnietom/py21cmmc_wv-1 | 2d5405700c1d99bd5f22c762999aea89d1ca1c23 | [
"MIT"
] | 1 | 2022-03-04T16:21:16.000Z | 2022-03-04T16:21:16.000Z | from py21cmmc_wv import morlet
import numpy as np
bw = 50.0
numin = 130.0
N = 736
nu = np.arange(N) * bw/N + numin
mid = (nu[0] + nu[-1])/2
spectrum = np.exp(-(nu-mid)**2/ (2*4.0**2))
trnsc, fc, _ = morlet.morlet_transform_c(spectrum, nu)
trnsc = np.abs(trnsc)**2
| 19.071429 | 54 | 0.636704 | from py21cmmc_wv import morlet
import numpy as np
bw = 50.0
numin = 130.0
N = 736
nu = np.arange(N) * bw/N + numin
mid = (nu[0] + nu[-1])/2
spectrum = np.exp(-(nu-mid)**2/ (2*4.0**2))
trnsc, fc, _ = morlet.morlet_transform_c(spectrum, nu)
trnsc = np.abs(trnsc)**2
| true | true |
f7300951a7ef4c3c8387293955d5b336a64a4701 | 151,815 | py | Python | zerver/tests/test_events.py | Spian91/zulip | 2893b98ef8ba44f91966a2455a49ed8bd86e0b7b | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_events.py | Spian91/zulip | 2893b98ef8ba44f91966a2455a49ed8bd86e0b7b | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_events.py | Spian91/zulip | 2893b98ef8ba44f91966a2455a49ed8bd86e0b7b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import copy
import os
import shutil
import sys
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from io import StringIO
from zerver.models import (
get_client, get_stream_recipient, get_stream, get_realm, get_system_bot,
Message, RealmDomain, Recipient, UserMessage, UserPresence, UserProfile,
Realm, Subscription, Stream, flush_per_request_caches, UserGroup, Service,
Attachment, PreregistrationUser, get_user_by_delivery_email, MultiuseInvite,
RealmAuditLog
)
from zerver.lib.actions import (
try_update_realm_custom_profile_field,
bulk_add_subscriptions,
bulk_remove_subscriptions,
check_add_realm_emoji,
check_send_message,
check_send_typing_notification,
do_add_alert_words,
do_add_default_stream,
do_add_reaction,
do_add_reaction_legacy,
do_add_realm_domain,
do_add_realm_filter,
do_add_streams_to_default_stream_group,
do_add_submessage,
do_change_avatar_fields,
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_default_stream_group_description,
do_change_default_stream_group_name,
do_change_full_name,
do_change_icon_source,
do_change_logo_source,
do_change_is_admin,
do_change_is_guest,
do_change_notification_settings,
do_change_plan_type,
do_change_realm_domain,
do_change_stream_description,
do_change_stream_invite_only,
do_change_stream_announcement_only,
do_change_subscription_property,
do_change_user_delivery_email,
do_create_user,
do_create_default_stream_group,
do_create_multiuse_invite_link,
do_deactivate_stream,
do_deactivate_user,
do_delete_messages,
do_invite_users,
do_mark_hotspot_as_read,
do_mute_topic,
do_reactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_default_stream,
do_remove_default_stream_group,
do_remove_reaction,
do_remove_reaction_legacy,
do_remove_realm_domain,
do_remove_realm_emoji,
do_remove_realm_filter,
do_remove_streams_from_default_stream_group,
do_rename_stream,
do_revoke_multi_use_invite,
do_revoke_user_invite,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_property,
do_set_user_display_setting,
do_set_realm_notifications_stream,
do_set_realm_signup_notifications_stream,
do_unmute_topic,
do_update_embedded_data,
do_update_message,
do_update_message_flags,
do_update_outgoing_webhook_service,
do_update_pointer,
do_update_user_presence,
do_update_user_status,
get_typing_user_profiles,
log_event,
lookup_default_stream_groups,
notify_realm_custom_profile_fields,
check_add_user_group,
do_update_user_group_name,
do_update_user_group_description,
bulk_add_members_to_user_group,
remove_members_from_user_group,
check_delete_user_group,
do_update_user_custom_profile_data_if_changed,
)
from zerver.lib.events import (
apply_events,
fetch_initial_state_data,
get_raw_user_data,
post_process_state,
)
from zerver.lib.message import (
aggregate_unread_data,
get_raw_unread_data,
render_markdown,
UnreadMessagesResult,
)
from zerver.lib.test_helpers import POSTRequestMock, get_subscription, \
get_test_image_file, stub_event_queue_user_events, queries_captured, \
create_dummy_file, stdout_suppressed
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.topic import (
ORIG_TOPIC,
TOPIC_NAME,
TOPIC_LINKS,
)
from zerver.lib.topic_mutes import (
add_topic_mute,
)
from zerver.lib.validator import (
check_bool, check_dict, check_dict_only, check_float, check_int, check_list, check_string,
equals, check_none_or, Validator, check_url
)
from zerver.lib.users import get_api_key
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornado.event_queue import (
allocate_client_descriptor,
clear_client_event_queues_for_testing,
get_client_info_for_message_event,
process_message_event,
)
from zerver.tornado.views import get_events
import mock
import time
import ujson
class LogEventsTest(ZulipTestCase):
def test_with_missing_event_log_dir_setting(self) -> None:
with self.settings(EVENT_LOG_DIR=None):
log_event(dict())
def test_log_event_mkdir(self) -> None:
dir_name = os.path.join(settings.TEST_WORKER_DIR, "test-log-dir")
try:
shutil.rmtree(dir_name)
except OSError: # nocoverage
# assume it doesn't exist already
pass
self.assertFalse(os.path.exists(dir_name))
with self.settings(EVENT_LOG_DIR=dir_name):
event = {} # type: Dict[str, int]
log_event(event)
self.assertTrue(os.path.exists(dir_name))
class EventsEndpointTest(ZulipTestCase):
def test_events_register_endpoint(self) -> None:
# This test is intended to get minimal coverage on the
# events_register code paths
email = self.example_email("hamlet")
with mock.patch('zerver.views.events_register.do_events_register', return_value={}):
result = self.api_post(email, '/json/register')
self.assert_json_success(result)
with mock.patch('zerver.lib.events.request_event_queue', return_value=None):
result = self.api_post(email, '/json/register')
self.assert_json_error(result, "Could not allocate event queue")
return_event_queue = '15:11'
return_user_events = [] # type: List[Dict[str, Any]]
# Test that call is made to deal with a returning soft deactivated user.
with mock.patch('zerver.lib.events.reactivate_user_if_soft_deactivated') as fa:
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assertEqual(fa.call_count, 1)
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], -1)
self.assertEqual(result_dict['queue_id'], '15:11')
return_event_queue = '15:12'
return_user_events = [
{
'id': 6,
'type': 'pointer',
'pointer': 15,
}
]
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:12')
# Now test with `fetch_event_types` not matching the event
return_event_queue = '15:13'
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(event_types=ujson.dumps(['pointer']),
fetch_event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that the message event types data is in there
self.assertIn('max_message_id', result_dict)
# Check that the pointer event types data is not in there
self.assertNotIn('pointer', result_dict)
self.assertEqual(result_dict['queue_id'], '15:13')
# Now test with `fetch_event_types` matching the event
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(fetch_event_types=ujson.dumps(['pointer']),
event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that we didn't fetch the messages data
self.assertNotIn('max_message_id', result_dict)
# Check that the pointer data is in there, and is correctly
# updated (presering our atomicity guaranteed), though of
# course any future pointer events won't be distributed
self.assertIn('pointer', result_dict)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:13')
def test_tornado_endpoint(self) -> None:
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[self.example_user('hamlet').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any]) -> HttpResponse:
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
recipient_user_profile = self.example_user('othello')
recipient_email = recipient_user_profile.email
self.login(email)
result = self.tornado_call(get_events, user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = '10.01'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id = '10.02'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
def get_message(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
result = self.tornado_call(
get_events,
user_profile,
dict(
apply_markdown=ujson.dumps(apply_markdown),
client_gravatar=ujson.dumps(client_gravatar),
event_types=ujson.dumps(["message"]),
narrow=ujson.dumps([["stream", "denmark"]]),
user_client="website",
dont_block=ujson.dumps(True),
)
)
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_personal_message(email, self.example_email("othello"), "hello")
self.send_stream_message(email, "Denmark", "**hello**")
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
return events[0]['message']
message = get_message(apply_markdown=False, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=True, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=False, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(message["avatar_url"], None)
message = get_message(apply_markdown=True, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertEqual(message["avatar_url"], None)
class EventsRegisterTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
def create_bot(self, email: str, **extras: Any) -> Optional[UserProfile]:
return self.create_test_bot(email, self.user_profile, **extras)
def realm_bot_schema(self, field_name: str, check: Validator) -> Validator:
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action: Callable[[], object], event_types: Optional[List[str]]=None,
include_subscribers: bool=True, state_change_expected: bool=True,
notification_settings_null: bool=False,
client_gravatar: bool=False, num_events: int=1) -> List[Dict[str, Any]]:
'''
Make sure we have a clean slate of client descriptors for these tests.
If we don't do this, then certain failures will only manifest when you
run multiple tests within a single test function.
See also https://zulip.readthedocs.io/en/latest/subsystems/events-system.html#testing
for details on the design of this test system.
'''
clear_client_event_queues_for_testing()
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm_id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
client_gravatar = client_gravatar,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
# hybrid_state = initial fetch state + re-applying events triggered by our action
# normal_state = do action then fetch at the end (the "normal" code path)
hybrid_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers
)
action()
events = client.event_queue.contents()
self.assertEqual(len(events), num_events)
initial_state = copy.deepcopy(hybrid_state)
post_process_state(self.user_profile, initial_state, notification_settings_null)
before = ujson.dumps(initial_state)
apply_events(hybrid_state, events, self.user_profile,
client_gravatar=True, include_subscribers=include_subscribers)
post_process_state(self.user_profile, hybrid_state, notification_settings_null)
after = ujson.dumps(hybrid_state)
if state_change_expected:
if before == after: # nocoverage
print(ujson.dumps(initial_state, indent=2))
print(events)
raise AssertionError('Test does not exercise enough code -- events do not change state.')
else:
try:
self.match_states(initial_state, copy.deepcopy(hybrid_state), events)
except AssertionError: # nocoverage
raise AssertionError('Test is invalid--state actually does change here.')
normal_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers,
)
post_process_state(self.user_profile, normal_state, notification_settings_null)
self.match_states(hybrid_state, normal_state, events)
return events
def assert_on_error(self, error: Optional[str]) -> None:
if error:
raise AssertionError(error)
def match_states(self, state1: Dict[str, Any], state2: Dict[str, Any],
events: List[Dict[str, Any]]) -> None:
def normalize(state: Dict[str, Any]) -> None:
for u in state['never_subscribed']:
if 'subscribers' in u:
u['subscribers'].sort()
for u in state['subscriptions']:
if 'subscribers' in u:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
# If this assertions fails, we have unusual problems.
self.assertEqual(state1.keys(), state2.keys())
# The far more likely scenario is that some section of
# our enormous payload does not get updated properly. We
# want the diff here to be developer-friendly, hence
# the somewhat tedious code to provide useful output.
if state1 != state2: # nocoverage
print('\n---States DO NOT MATCH---')
print('\nEVENTS:\n')
# Printing out the events is a big help to
# developers.
import json
for event in events:
print(json.dumps(event, indent=4))
print('\nMISMATCHES:\n')
for k in state1:
if state1[k] != state2[k]:
print('\nkey = ' + k)
try:
self.assertEqual({k: state1[k]}, {k: state2[k]})
except AssertionError as e:
print(e)
print('''
NOTE:
This is an advanced test that verifies how
we apply events after fetching data. If you
do not know how to debug it, you can ask for
help on chat.
''')
sys.stdout.flush()
raise AssertionError('Mismatching states')
def check_events_dict(self, required_keys: List[Tuple[str, Validator]]) -> Validator:
required_keys.append(('id', check_int))
# Raise AssertionError if `required_keys` contains duplicate items.
keys = [key[0] for key in required_keys]
self.assertEqual(len(keys), len(set(keys)), 'Duplicate items found in required_keys.')
return check_dict_only(required_keys)
def test_mentioned_send_message_events(self) -> None:
user = self.example_user('hamlet')
for i in range(3):
content = 'mentioning... @**' + user.full_name + '** hello ' + str(i)
self.do_test(
lambda: self.send_stream_message(self.example_email('cordelia'),
"Verona",
content)
)
def test_wildcard_mentioned_send_message_events(self) -> None:
for i in range(3):
content = 'mentioning... @**all** hello ' + str(i)
self.do_test(
lambda: self.send_stream_message(self.example_email('cordelia'),
"Verona",
content)
)
def test_pm_send_message_events(self) -> None:
self.do_test(
lambda: self.send_personal_message(self.example_email('cordelia'),
self.example_email('hamlet'),
'hola')
)
def test_huddle_send_message_events(self) -> None:
huddle = [
self.example_email('hamlet'),
self.example_email('othello'),
]
self.do_test(
lambda: self.send_huddle_message(self.example_email('cordelia'),
huddle,
'hola')
)
def test_stream_send_message_events(self) -> None:
def get_checker(check_gravatar: Validator) -> Validator:
schema_checker = self.check_events_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', self.check_events_dict([
('avatar_url', check_gravatar),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('is_me_message', check_bool),
('reactions', check_list(None)),
('recipient_id', check_int),
('sender_realm_str', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('stream_id', check_int),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('submessages', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
return schema_checker
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=False,
)
schema_checker = get_checker(check_gravatar=check_string)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=True,
)
schema_checker = get_checker(check_gravatar=equals(None))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify message editing
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('message_id', check_int),
('message_ids', check_list(check_int)),
('prior_mention_user_ids', check_list(check_int)),
('mention_user_ids', check_list(check_int)),
('presence_idle_user_ids', check_list(check_int)),
('stream_push_user_ids', check_list(check_int)),
('stream_email_user_ids', check_list(check_int)),
('push_notify_user_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
(ORIG_TOPIC, check_string),
('prev_rendered_content_version', check_int),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('stream_name', check_string),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('user_id', check_int),
('is_me_message', check_bool),
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
prior_mention_user_ids = set() # type: Set[int]
mentioned_user_ids = set() # type: Set[int]
events = self.do_test(
lambda: do_update_message(self.user_profile, message, topic,
propagate_mode, content, rendered_content,
prior_mention_user_ids,
mentioned_user_ids),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify do_update_embedded_data
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('message_id', check_int),
('message_ids', check_list(check_int)),
('rendered_content', check_string),
('sender', check_string),
])
events = self.do_test(
lambda: do_update_embedded_data(self.user_profile, message,
u"embed_content", "<p>embed_content</p>"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_message_flags(self) -> None:
# Test message flag update events
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("add")),
])
message = self.send_personal_message(
self.example_email("cordelia"),
self.example_email("hamlet"),
"hello",
)
user_profile = self.example_user('hamlet')
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("remove")),
])
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'remove', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_read_flag_removes_unread_msg_ids(self) -> None:
user_profile = self.example_user('hamlet')
mention = '@**' + user_profile.full_name + '**'
for content in ['hello', mention]:
message = self.send_stream_message(
self.example_email('cordelia'),
"Verona",
content
)
self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'read', [message]),
state_change_expected=True,
)
def test_send_message_to_existing_recipient(self) -> None:
self.send_stream_message(
self.example_email('cordelia'),
"Verona",
"hello 1"
)
self.do_test(
lambda: self.send_stream_message("cordelia@zulip.com", "Verona", "hello 2"),
state_change_expected=True,
)
def test_add_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction_legacy(self.user_profile, message, "tada")
events = self.do_test(
lambda: do_remove_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction(
self.user_profile, message, "tada", "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_submessage(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('submessage')),
('message_id', check_int),
('submessage_id', check_int),
('sender_id', check_int),
('msg_type', check_string),
('content', check_string),
])
cordelia = self.example_user('cordelia')
stream_name = 'Verona'
message_id = self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
)
events = self.do_test(
lambda: do_add_submessage(
realm=cordelia.realm,
sender_id=cordelia.id,
message_id=message_id,
msg_type='whatever',
content='"stuff"',
),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction(self.user_profile, message, "tada", "1f389", "unicode_emoji")
events = self.do_test(
lambda: do_remove_reaction(
self.user_profile, message, "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invite_user_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_user_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_users = PreregistrationUser.objects.filter(referred_by__realm=self.user_profile.realm)
events = self.do_test(
lambda: do_revoke_user_invite(prereg_users[0]),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams)
multiuse_object = MultiuseInvite.objects.get()
events = self.do_test(
lambda: do_revoke_multi_use_invite(multiuse_object),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invitation_accept_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_users = PreregistrationUser.objects.get(email="foo@zulip.com")
events = self.do_test(
lambda: do_create_user('foo@zulip.com', 'password', self.user_profile.realm,
'full name', 'short name', prereg_user=prereg_users),
state_change_expected=True,
num_events=5,
)
error = schema_checker('events[4]', events[4])
self.assert_on_error(error)
def test_typing_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('typing')),
('op', equals('start')),
('sender', check_dict_only([
('email', check_string),
('user_id', check_int)])),
('recipients', check_list(check_dict_only([
('email', check_string),
('user_id', check_int),
]))),
])
events = self.do_test(
lambda: check_send_typing_notification(
self.user_profile, [self.example_email("cordelia")], "start"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_get_typing_user_profiles(self) -> None:
"""
Make sure we properly assert failures for recipient types that should not
get typing... notifications.
"""
sender_profile = self.example_user('cordelia')
stream = get_stream('Rome', sender_profile.realm)
# Test stream
with self.assertRaisesRegex(ValueError, 'not supported for streams'):
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
get_typing_user_profiles(recipient, sender_profile.id)
# Test some other recipient type
with self.assertRaisesRegex(ValueError, 'Bad recipient type'):
recipient = Recipient(type=999) # invalid type
get_typing_user_profiles(recipient, sender_profile.id)
def test_custom_profile_fields_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('custom_profile_fields')),
('op', equals('add')),
('fields', check_list(check_dict_only([
('id', check_int),
('type', check_int),
('name', check_string),
('hint', check_string),
('field_data', check_string),
('order', check_int),
]))),
])
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
realm = self.user_profile.realm
field = realm.customprofilefield_set.get(realm=realm, name='Biography')
name = field.name
hint = 'Biography of the user'
try_update_realm_custom_profile_field(realm, field, name, hint=hint)
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_custom_profile_field_data_events(self) -> None:
schema_checker_basic = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
])),
])),
])
schema_checker_with_rendered_value = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
('rendered_value', check_none_or(check_string)),
])),
])),
])
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Biography').id
field = {
"id": field_id,
"value": "New value",
}
events = self.do_test(lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]))
error = schema_checker_with_rendered_value('events[0]', events[0])
self.assert_on_error(error)
# Test we pass correct stringify value in custom-user-field data event
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Mentor').id
field = {
"id": field_id,
"value": [self.example_user("ZOE").id],
}
events = self.do_test(lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]))
error = schema_checker_basic('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('website', check_dict_only([
('status', equals('active')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events_multiple_clients(self) -> None:
schema_checker_android = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('ZulipAndroid/1.0', check_dict_only([
('status', equals('idle')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
self.api_post(self.user_profile.email, "/api/v1/users/me/presence", {'status': 'idle'},
HTTP_USER_AGENT="ZulipAndroid/1.0")
self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("ZulipAndroid/1.0"), timezone_now(), UserPresence.IDLE))
error = schema_checker_android('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, get_client("website"), 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "test1@zulip.com")
def test_register_events_email_address_visibility(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "user%s@zulip.testserver" % (new_user_profile.id,))
def test_alert_words_events(self) -> None:
alert_words_checker = self.check_events_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_away_events(self) -> None:
checker = self.check_events_dict([
('type', equals('user_status')),
('user_id', check_int),
('away', check_bool),
('status_text', check_string),
])
client = get_client("website")
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=True,
status_text='out to lunch',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=False,
status_text='',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
def test_user_group_events(self) -> None:
user_group_add_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add')),
('group', check_dict_only([
('id', check_int),
('name', check_string),
('members', check_list(check_int)),
('description', check_string),
])),
])
othello = self.example_user('othello')
events = self.do_test(lambda: check_add_user_group(self.user_profile.realm,
'backend', [othello],
'Backend team'))
error = user_group_add_checker('events[0]', events[0])
self.assert_on_error(error)
# Test name update
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('name', check_string),
])),
])
backend = UserGroup.objects.get(name='backend')
events = self.do_test(lambda: do_update_user_group_name(backend, 'backendteam'))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
# Test description update
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('description', check_string),
])),
])
description = "Backend team to deal with backend code."
events = self.do_test(lambda: do_update_user_group_description(backend, description))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
# Test add members
user_group_add_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: bulk_add_members_to_user_group(backend, [hamlet]))
error = user_group_add_member_checker('events[0]', events[0])
self.assert_on_error(error)
# Test remove members
user_group_remove_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: remove_members_from_user_group(backend, [hamlet]))
error = user_group_remove_member_checker('events[0]', events[0])
self.assert_on_error(error)
# Test delete event
user_group_remove_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove')),
('group_id', check_int),
])
events = self.do_test(lambda: check_delete_user_group(backend.id, othello))
error = user_group_remove_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_groups_events(self) -> None:
default_stream_groups_checker = self.check_events_dict([
('type', equals('default_stream_groups')),
('default_stream_groups', check_list(check_dict_only([
('name', check_string),
('id', check_int),
('description', check_string),
('streams', check_list(check_dict_only([
('description', check_string),
('rendered_description', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('name', check_string),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool)]))),
]))),
])
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(lambda: do_create_default_stream_group(
self.user_profile.realm, "group1", "This is group1", streams))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
venice_stream = get_stream("Venice", self.user_profile.realm)
events = self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_streams_from_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_description(self.user_profile.realm,
group, "New description"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_name(self.user_profile.realm,
group, "New Group Name"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_default_stream_group(self.user_profile.realm, group))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_group_events_guest(self) -> None:
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_default_stream_group(self.user_profile.realm, "group1",
"This is group1", streams)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
do_change_is_guest(self.user_profile, True)
venice_stream = get_stream("Venice", self.user_profile.realm)
self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]),
state_change_expected = False, num_events=0)
def test_default_streams_events(self) -> None:
default_streams_checker = self.check_events_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict_only([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
stream = get_stream("Scotland", self.user_profile.realm)
events = self.do_test(lambda: do_add_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
events = self.do_test(lambda: do_remove_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events_guest(self) -> None:
do_change_is_guest(self.user_profile, True)
stream = get_stream("Scotland", self.user_profile.realm)
self.do_test(lambda: do_add_default_stream(stream),
state_change_expected = False, num_events=0)
self.do_test(lambda: do_remove_default_stream(stream),
state_change_expected = False, num_events=0)
def test_muted_topics_events(self) -> None:
muted_topics_checker = self.check_events_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
stream = get_stream('Denmark', self.user_profile.realm)
recipient = get_stream_recipient(stream.id)
events = self.do_test(lambda: do_mute_topic(
self.user_profile, stream, recipient, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_unmute_topic(
self.user_profile, stream, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_avatar_fields(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_string),
('avatar_url_medium', check_string),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_USER),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_none_or(check_string)),
('avatar_url_medium', check_none_or(check_string)),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_GRAVATAR),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet', self.user_profile))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_user_delivery_email_email_address_visibilty_admins(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('delivery_email', check_string),
('user_id', check_int),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
# Important: We need to refresh from the database here so that
# we don't have a stale UserProfile object with an old value
# for email being passed into this next function.
self.user_profile.refresh_from_db()
action = lambda: do_change_user_delivery_email(self.user_profile, 'newhamlet@zulip.com')
events = self.do_test(action, num_events=1)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_realm_property_test(self, name: str) -> None:
bool_tests = [True, False, True] # type: List[bool]
test_values = dict(
default_language=[u'es', u'de', u'en'],
description=[u'Realm description', u'New description'],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
create_stream_policy=[3, 2, 1],
invite_to_stream_policy=[3, 2, 1],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS],
bot_creation_policy=[Realm.BOT_CREATION_EVERYONE],
video_chat_provider=[
Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'],
Realm.VIDEO_CHAT_PROVIDERS['google_hangouts']['id']
],
google_hangouts_domain=[u"zulip.com", u"zulip.org"],
zoom_api_secret=[u"abc", u"xyz"],
zoom_api_key=[u"abc", u"xyz"],
zoom_user_id=[u"example@example.com", u"example@example.org"]
) # type: Dict[str, Any]
vals = test_values.get(name)
property_type = Realm.property_types[name]
if property_type is bool:
validator = check_bool
vals = bool_tests
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
elif property_type == (int, type(None)):
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals(name)),
('value', validator),
])
if vals is None:
raise AssertionError('No test created for %s' % (name,))
do_set_realm_property(self.user_profile.realm, name, vals[0])
for val in vals[1:]:
state_change_expected = True
if name == "zoom_api_secret":
state_change_expected = False
events = self.do_test(
lambda: do_set_realm_property(self.user_profile.realm, name, val),
state_change_expected=state_change_expected)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Actually runs several full-stack fetching tests")
def test_change_realm_property(self) -> None:
for prop in Realm.property_types:
with self.settings(SEND_DIGEST_EMAILS=True):
self.do_set_realm_property_test(prop)
@slow("Runs a large matrix of tests")
def test_change_realm_authentication_methods(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('authentication_methods', check_dict([]))
])),
])
def fake_backends() -> Any:
backends = (
'zproject.backends.DevAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend',
)
return self.settings(AUTHENTICATION_BACKENDS=backends)
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
with fake_backends():
events = self.do_test(
lambda: do_set_realm_authentication_methods(
self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", False)
for pinned in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_stream_notification_settings(self) -> None:
for setting_name in ['email_notifications']:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals(setting_name)),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
# First test with notification_settings_null enabled
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value),
notification_settings_null=True)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Runs a matrix of 6 queries to the /home view")
def test_change_realm_message_edit_settings(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int),
('allow_community_topic_editing', check_bool),
])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (False, 1234),
(True, 600), (False, 0), (True, 1234)):
events = self.do_test(
lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing,
message_content_edit_limit_seconds,
False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for notifications_stream, notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_notifications_stream(self.user_profile.realm,
notifications_stream,
notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_signup_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('signup_notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for signup_notifications_stream, signup_notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_signup_notifications_stream(self.user_profile.realm,
signup_notifications_stream,
signup_notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('is_admin', check_bool),
('user_id', check_int),
])),
])
do_change_is_admin(self.user_profile, False)
for is_admin in [True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_user_display_settings_test(self, setting_name: str) -> None:
"""Test updating each setting in UserProfile.property_types dict."""
test_changes = dict(
emojiset = [u'twitter'],
default_language = [u'es', u'de', u'en'],
timezone = [u'US/Mountain', u'US/Samoa', u'Pacific/Galapogos', u''],
demote_inactive_streams = [2, 3, 1],
) # type: Dict[str, Any]
property_type = UserProfile.property_types[setting_name]
if property_type is bool:
validator = check_bool
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
num_events = 1
if setting_name == "timezone":
num_events = 2
values = test_changes.get(setting_name)
if property_type is bool:
if getattr(self.user_profile, setting_name) is False:
values = [True, False, True]
else:
values = [False, True, False]
if values is None:
raise AssertionError('No test created for %s' % (setting_name,))
for value in values:
events = self.do_test(lambda: do_set_user_display_setting(
self.user_profile, setting_name, value), num_events=num_events)
schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
language_schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('language_name', check_string),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
if setting_name == "default_language":
error = language_schema_checker('events[0]', events[0])
else:
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
timezone_schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('timezone', check_string),
])),
])
if setting_name == "timezone":
error = timezone_schema_checker('events[1]', events[1])
@slow("Actually runs several full-stack fetching tests")
def test_set_user_display_settings(self) -> None:
for prop in UserProfile.property_types:
self.do_set_user_display_settings_test(prop)
@slow("Actually runs several full-stack fetching tests")
def test_change_notification_settings(self) -> None:
for notification_setting, v in self.user_profile.notification_setting_types.items():
if notification_setting in ["notification_sound", "desktop_icon_count_display"]:
# These settings are tested in their own tests.
continue
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', check_bool),
])
do_change_notification_settings(self.user_profile, notification_setting, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Also test with notification_settings_null=True
events = self.do_test(
lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False),
notification_settings_null=True,
state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_notification_sound(self) -> None:
notification_setting = "notification_sound"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals("ding")),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 'ding', log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_desktop_icon_count_display(self) -> None:
notification_setting = "desktop_icon_count_display"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(2)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 2, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(1)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 1, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_update_plan_type(self) -> None:
realm = self.user_profile.realm
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.SELF_HOSTED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('plan_type')),
('value', equals(Realm.LIMITED)),
('extra_data', check_dict_only([
('upload_quota', check_int)
])),
])
events = self.do_test(lambda: do_change_plan_type(realm, Realm.LIMITED))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.LIMITED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], False)
def test_realm_emoji_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
author = self.example_user('iago')
with get_test_image_file('img.png') as img_file:
events = self.do_test(lambda: check_add_realm_emoji(self.user_profile.realm,
"my_emoji",
author,
img_file))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(self.user_profile.realm, "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(self.user_profile.realm, "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(self.user_profile.realm, "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_domain_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('add')),
('realm_domain', check_dict_only([
('domain', check_string),
('allow_subdomains', check_bool),
])),
])
events = self.do_test(lambda: do_add_realm_domain(
self.user_profile.realm, 'zulip.org', False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('change')),
('realm_domain', check_dict_only([
('domain', equals('zulip.org')),
('allow_subdomains', equals(True)),
])),
])
test_domain = RealmDomain.objects.get(realm=self.user_profile.realm,
domain='zulip.org')
events = self.do_test(lambda: do_change_realm_domain(test_domain, True))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('remove')),
('domain', equals('zulip.org')),
])
events = self.do_test(lambda: do_remove_realm_domain(test_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self) -> None:
def get_bot_created_checker(bot_type: str) -> Validator:
if bot_type == "GENERIC_BOT":
check_services = check_list(sub_validator=None, length=0)
elif bot_type == "OUTGOING_WEBHOOK_BOT":
check_services = check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]), length=1)
elif bot_type == "EMBEDDED_BOT":
check_services = check_list(check_dict_only([
('service_name', check_string),
('config_data', check_dict(value_validator=check_string)),
]), length=1)
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
action = lambda: self.create_bot('test')
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="GENERIC_BOT")('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.create_bot('test_outgoing_webhook',
full_name='Outgoing Webhook Bot',
payload_url=ujson.dumps('https://foo.bar.com'),
interface_type=Service.GENERIC,
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT)
events = self.do_test(action, num_events=3)
# The third event is the second call of notify_created_bot, which contains additional
# data for services (in contrast to the first call).
error = get_bot_created_checker(bot_type="OUTGOING_WEBHOOK_BOT")('events[2]', events[2])
self.assert_on_error(error)
action = lambda: self.create_bot('test_embedded',
full_name='Embedded Bot',
service_name='helloworld',
config_data=ujson.dumps({'foo': 'bar'}),
bot_type=UserProfile.EMBEDDED_BOT)
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="EMBEDDED_BOT")('events[2]', events[2])
self.assert_on_error(error)
def test_change_bot_full_name(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_full_name(bot, 'New Bot Name', self.user_profile)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self) -> None:
bot = self.create_bot('test')
action = lambda: do_regenerate_api_key(bot, self.user_profile)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_avatar_fields(bot, bot.AVATAR_FROM_USER)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assertEqual(events[1]['type'], 'realm_user')
self.assert_on_error(error)
def test_change_realm_icon_source(self) -> None:
action = lambda: do_change_icon_source(self.user_profile.realm, Realm.ICON_UPLOADED)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('icon')),
('data', check_dict_only([
('icon_url', check_string),
('icon_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_day_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, False)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('logo')),
('data', check_dict_only([
('logo_url', check_string),
('logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_night_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, True)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('night_logo')),
('data', check_dict_only([
('night_logo_url', check_string),
('night_logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_default_all_public_streams(bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_sending_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_sending_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_events_register_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_events_register_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_owner(self) -> None:
change_bot_owner_checker_user = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('bot_owner_id', check_int),
])),
])
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('owner_id', check_int),
])),
])
self.user_profile = self.example_user('iago')
owner = self.example_user('hamlet')
bot = self.create_bot('test')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('delete')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
])),
])
self.user_profile = self.example_user('aaron')
owner = self.example_user('hamlet')
bot = self.create_bot('test1', full_name='Test1 Testerson')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
check_services = check_list(sub_validator=None, length=0)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
previous_owner = self.example_user('aaron')
self.user_profile = self.example_user('hamlet')
bot = self.create_test_bot('test2', previous_owner, full_name='Test2 Testerson')
action = lambda: do_change_bot_owner(bot, self.user_profile, previous_owner)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
def test_do_update_outgoing_webhook_service(self):
# type: () -> None
update_outgoing_webhook_service_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]))),
])),
])
self.user_profile = self.example_user('iago')
bot = self.create_test_bot('test', self.user_profile,
full_name='Test Bot',
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
payload_url=ujson.dumps('http://hostname.domain2.com'),
interface_type=Service.GENERIC,
)
action = lambda: do_update_outgoing_webhook_service(bot, 2, 'http://hostname.domain2.com')
events = self.do_test(action)
error = update_outgoing_webhook_service_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self) -> None:
bot_deactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
bot = self.create_bot('test')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_reactivate_user(self) -> None:
bot_reactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_none_or(check_string)),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
]))),
])),
])
bot = self.create_bot('test')
do_deactivate_user(bot)
action = lambda: do_reactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_reactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_mark_hotspot_as_read(self) -> None:
self.user_profile.tutorial_status = UserProfile.TUTORIAL_WAITING
self.user_profile.save(update_fields=['tutorial_status'])
schema_checker = self.check_events_dict([
('type', equals('hotspots')),
('hotspots', check_list(check_dict_only([
('name', check_string),
('title', check_string),
('description', check_string),
('delay', check_float),
]))),
])
events = self.do_test(lambda: do_mark_hotspot_as_read(self.user_profile, 'intro_reply'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_rename_stream(self) -> None:
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe(self.user_profile, stream.name)
notification = '<p><span class="user-mention silent" data-user-id="{user_id}">King Hamlet</span> renamed stream <strong>old_name</strong> to <strong>stream with a brand new name</strong>.</p>'
notification = notification.format(user_id=self.user_profile.id)
action = lambda: do_rename_stream(stream, new_name, self.user_profile)
events = self.do_test(action, num_events=3)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('stream_id', check_int),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
('stream_id', check_int),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
schema_checker = check_dict([
('flags', check_list(check_string)),
('type', equals('message')),
('message', check_dict([
('timestamp', check_int),
('content', equals(notification)),
('content_type', equals('text/html')),
('sender_email', equals('notification-bot@zulip.com')),
('sender_id', check_int),
('sender_short_name', equals('notification-bot')),
('display_recipient', equals(new_name)),
('id', check_int),
('stream_id', check_int),
('sender_realm_str', check_string),
('sender_full_name', equals('Notification Bot')),
('is_me_message', equals(False)),
('type', equals('stream')),
('submessages', check_list(check_string)),
(TOPIC_LINKS, check_list(check_url)),
('avatar_url', check_url),
('reactions', check_list(None)),
('client', equals('Internal')),
(TOPIC_NAME, equals('stream events')),
('recipient_id', check_int)
])),
('id', check_int)
])
error = schema_checker('events[2]', events[2])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self) -> None:
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self) -> None:
action = lambda: self.subscribe(self.example_user("othello"), u"test_stream")
events = self.do_test(action, num_events=2)
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = peer_add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
@slow("Actually several tests combined together")
def test_subscribe_events(self) -> None:
self.do_test_subscribe_events(include_subscribers=True)
@slow("Actually several tests combined together")
def test_subscribe_events_no_include_subscribers(self) -> None:
self.do_test_subscribe_events(include_subscribers=False)
def do_test_subscribe_events(self, include_subscribers: bool) -> None:
subscription_fields = [
('color', check_string),
('description', check_string),
('rendered_description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('is_muted', check_bool),
('in_home_view', check_bool),
('name', check_string),
('audible_notifications', check_none_or(check_bool)),
('email_notifications', check_none_or(check_bool)),
('desktop_notifications', check_none_or(check_bool)),
('push_notifications', check_none_or(check_bool)),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool),
('pin_to_top', check_bool),
('stream_weekly_traffic', check_none_or(check_int)),
('is_old_stream', check_bool),
]
if include_subscribers:
subscription_fields.append(('subscribers', check_list(check_int)))
subscription_schema_checker = check_list(
check_dict_only(subscription_fields),
)
stream_create_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict_only([
('name', check_string),
('stream_id', check_int),
('invite_only', check_bool),
('description', check_string),
('rendered_description', check_string),
]))),
])
add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict_only([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('rendered_description', check_string),
('stream_id', check_int),
('name', check_string),
])
stream_update_invite_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('invite_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
('history_public_to_subscribers', check_bool),
])
stream_update_is_announcement_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('is_announcement_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
])
# Subscribe to a totally new stream, so it's just Hamlet on it
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream") # type: Callable[[], object]
events = self.do_test(action, event_types=["subscription", "realm_user"],
include_subscribers=include_subscribers)
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Add another user to that totally new stream
action = lambda: self.subscribe(self.example_user("othello"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
# Now remove the first user, to test the normal unsubscribe flow
action = lambda: bulk_remove_subscriptions(
[self.example_user('othello')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the second user, to test the 'vacate' event flow
action = lambda: bulk_remove_subscriptions(
[self.example_user('hamlet')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=3)
error = remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now resubscribe a user, to make sure that works on a vacated stream
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=2)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(stream, u'new description')
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Update stream privacy
action = lambda: do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_invite_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Update stream is_announcement_only property
action = lambda: do_change_stream_announcement_only(stream, True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_is_announcement_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Subscribe to a totally new invite-only stream, so it's just Hamlet on it
stream = self.make_stream("private", self.user_profile.realm, invite_only=True)
user_profile = self.example_user('hamlet')
action = lambda: bulk_add_subscriptions([stream], [user_profile])
events = self.do_test(action, include_subscribers=include_subscribers,
num_events=2)
error = stream_create_schema_checker('events[0]', events[0])
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_delete_message_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("stream")),
('stream_id', check_int),
('topic', check_string),
])
msg_id = self.send_stream_message("hamlet@zulip.com", "Verona")
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_personal(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("private")),
('recipient_id', check_int),
])
msg_id = self.send_personal_message(
self.example_email("cordelia"),
self.user_profile.email,
"hello",
)
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_no_max_id(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
user_profile = self.example_user('hamlet')
UserMessage.objects.filter(user_profile=user_profile).delete()
msg_id = self.send_stream_message("hamlet@zulip.com", "Verona")
message = Message.objects.get(id=msg_id)
self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
def test_add_attachment(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('add')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.login(self.example_email("hamlet"))
fp = StringIO("zulip!")
fp.name = "zulip.txt"
data = {'uri': None}
def do_upload() -> None:
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
self.assertIn("uri", result.json())
uri = result.json()["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
data['uri'] = uri
events = self.do_test(
lambda: do_upload(),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify that the DB has the attachment marked as unclaimed
entry = Attachment.objects.get(file_name='zulip.txt')
self.assertEqual(entry.is_claimed(), False)
# Now we send an actual message using this attachment.
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('update')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.subscribe(self.example_user("hamlet"), "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + data['uri'] + ")"
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Denmark", body, "test"),
num_events=2)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the attachment
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('remove')),
('attachment', check_dict_only([
('id', check_int),
])),
('upload_space_used', equals(0)),
])
events = self.do_test(
lambda: self.client_delete("/json/attachments/%s" % (entry.id,)),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_notify_realm_export(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_export')),
('exports', check_list(check_dict_only([
('id', check_int),
('export_time', check_float),
('acting_user_id', check_int),
('export_url', check_string),
('deleted_timestamp', equals(None)),
]))),
])
do_change_is_admin(self.user_profile, True)
self.login(self.user_profile.email)
with mock.patch('zerver.lib.export.do_export_realm',
return_value=create_dummy_file('test-export.tar.gz')):
with stdout_suppressed():
events = self.do_test(
lambda: self.client_post('/json/export/realm'),
state_change_expected=True, num_events=2)
# The first event is a message from notification-bot.
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
# Now we check the deletion of the export.
deletion_schema_checker = self.check_events_dict([
('type', equals('realm_export')),
('exports', check_list(check_dict_only([
('id', check_int),
('export_time', check_float),
('acting_user_id', check_int),
('export_url', check_string),
('deleted_timestamp', check_float),
]))),
])
audit_log_entry = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_EXPORTED).first()
events = self.do_test(
lambda: self.client_delete('/json/export/realm/{id}'.format(id=audit_log_entry.id)),
state_change_expected=False, num_events=1)
error = deletion_schema_checker('events[0]', events[0])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self) -> None:
user_profile = self.example_user('cordelia')
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assert_length(result['realm_bots'], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_api_key(self.notification_bot())
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_e(self) -> None:
user_profile = self.example_user('hamlet')
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertTrue(len(result['realm_bots']) > 2)
def test_max_message_id_with_no_history(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
def test_delivery_email_presence_for_non_admins(self) -> None:
user_profile = self.example_user('aaron')
self.assertFalse(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
def test_delivery_email_presence_for_admins(self) -> None:
user_profile = self.example_user('iago')
self.assertTrue(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertIn('delivery_email', value)
class GetUnreadMsgsTest(ZulipTestCase):
def mute_stream(self, user_profile: UserProfile, stream: Stream) -> None:
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(
user_profile=user_profile,
recipient=recipient
)
subscription.is_muted = True
subscription.save()
def mute_topic(self, user_profile: UserProfile, stream_name: str,
topic_name: str) -> None:
realm = user_profile.realm
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
add_topic_mute(
user_profile=user_profile,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def test_raw_unread_stream(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
realm = hamlet.realm
for stream_name in ['social', 'devel', 'test here']:
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
all_message_ids = set() # type: Set[int]
message_ids = dict()
tups = [
('social', 'lunch'),
('test here', 'bla'),
('devel', 'python'),
('devel', 'ruby'),
]
for stream_name, topic_name in tups:
message_ids[topic_name] = [
self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
topic_name=topic_name,
) for i in range(3)
]
all_message_ids |= set(message_ids[topic_name])
self.assertEqual(len(all_message_ids), 12) # sanity check on test setup
self.mute_stream(
user_profile=hamlet,
stream=get_stream('test here', realm),
)
self.mute_topic(
user_profile=hamlet,
stream_name='devel',
topic_name='ruby',
)
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
stream_dict = raw_unread_data['stream_dict']
self.assertEqual(
set(stream_dict.keys()),
all_message_ids,
)
self.assertEqual(
raw_unread_data['unmuted_stream_msgs'],
set(message_ids['python']) | set(message_ids['lunch']),
)
self.assertEqual(
stream_dict[message_ids['lunch'][0]],
dict(
sender_id=cordelia.id,
stream_id=get_stream('social', realm).id,
topic='lunch',
)
)
def test_raw_unread_huddle(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
prospero = self.example_user('prospero')
huddle1_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, othello.email]
)
for i in range(3)
]
huddle2_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, prospero.email]
)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
huddle_dict = raw_unread_data['huddle_dict']
self.assertEqual(
set(huddle_dict.keys()),
set(huddle1_message_ids) | set(huddle2_message_ids)
)
huddle_string = ','.join(
str(uid)
for uid in sorted([cordelia.id, hamlet.id, othello.id])
)
self.assertEqual(
huddle_dict[huddle1_message_ids[0]],
dict(user_ids_string=huddle_string),
)
def test_raw_unread_personal(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia_pm_message_ids = [
self.send_personal_message(cordelia.email, hamlet.email)
for i in range(3)
]
othello_pm_message_ids = [
self.send_personal_message(othello.email, hamlet.email)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data['pm_dict']
self.assertEqual(
set(pm_dict.keys()),
set(cordelia_pm_message_ids) | set(othello_pm_message_ids)
)
self.assertEqual(
pm_dict[cordelia_pm_message_ids[0]],
dict(sender_id=cordelia.id),
)
def test_unread_msgs(self) -> None:
cordelia = self.example_user('cordelia')
sender_id = cordelia.id
sender_email = cordelia.email
user_profile = self.example_user('hamlet')
othello = self.example_user('othello')
# our tests rely on order
assert(sender_email < user_profile.email)
assert(user_profile.email < othello.email)
pm1_message_id = self.send_personal_message(sender_email, user_profile.email, "hello1")
pm2_message_id = self.send_personal_message(sender_email, user_profile.email, "hello2")
muted_stream = self.subscribe(user_profile, 'Muted Stream')
self.mute_stream(user_profile, muted_stream)
self.mute_topic(user_profile, 'Denmark', 'muted-topic')
stream_message_id = self.send_stream_message(sender_email, "Denmark", "hello")
muted_stream_message_id = self.send_stream_message(sender_email, "Muted Stream", "hello")
muted_topic_message_id = self.send_stream_message(
sender_email,
"Denmark",
topic_name="muted-topic",
content="hello",
)
huddle_message_id = self.send_huddle_message(
sender_email,
[user_profile.email, othello.email],
'hello3',
)
def get_unread_data() -> UnreadMessagesResult:
raw_unread_data = get_raw_unread_data(user_profile)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data
result = get_unread_data()
# The count here reflects the count of unread messages that we will
# report to users in the bankruptcy dialog, and for now it excludes unread messages
# from muted treams, but it doesn't exclude unread messages from muted topics yet.
self.assertEqual(result['count'], 4)
unread_pm = result['pms'][0]
self.assertEqual(unread_pm['sender_id'], sender_id)
self.assertEqual(unread_pm['unread_message_ids'], [pm1_message_id, pm2_message_id])
self.assertTrue('sender_ids' not in unread_pm)
unread_stream = result['streams'][0]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'muted-topic')
self.assertEqual(unread_stream['unread_message_ids'], [muted_topic_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][1]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][2]
self.assertEqual(unread_stream['stream_id'], get_stream('Muted Stream', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [muted_stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
huddle_string = ','.join(str(uid) for uid in sorted([sender_id, user_profile.id, othello.id]))
unread_huddle = result['huddles'][0]
self.assertEqual(unread_huddle['user_ids_string'], huddle_string)
self.assertEqual(unread_huddle['unread_message_ids'], [huddle_message_id])
self.assertTrue('sender_ids' not in unread_huddle)
self.assertEqual(result['mentions'], [])
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=stream_message_id
)
um.flags |= UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
# TODO: This should change when we make alert words work better.
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [stream_message_id])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
# Test with a muted stream
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_stream_message_id
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [muted_stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
# Test with a muted topic
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_topic_message_id
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [muted_topic_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
class ClientDescriptorsTest(ZulipTestCase):
def test_get_client_info_for_all_public_streams(self) -> None:
hamlet = self.example_user('hamlet')
realm = hamlet.realm
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, True)
self.assertEqual(dct['client'].client_gravatar, True)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], [])
self.assertEqual(dct['is_sender'], False)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
sender_queue_id=client.event_queue.id,
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['is_sender'], True)
def test_get_client_info_for_normal_users(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = hamlet.realm
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
],
)
self.assertEqual(len(client_info), 0)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
dict(id=hamlet.id, flags=['mentioned']),
],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, apply_markdown)
self.assertEqual(dct['client'].client_gravatar, client_gravatar)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], ['mentioned'])
self.assertEqual(dct['is_sender'], False)
test_get_info(apply_markdown=False, client_gravatar=False)
test_get_info(apply_markdown=True, client_gravatar=False)
test_get_info(apply_markdown=False, client_gravatar=True)
test_get_info(apply_markdown=True, client_gravatar=True)
def test_process_message_event_with_mocked_client_info(self) -> None:
hamlet = self.example_user("hamlet")
class MockClient:
def __init__(self, user_profile_id: int,
apply_markdown: bool,
client_gravatar: bool) -> None:
self.user_profile_id = user_profile_id
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.client_type_name = 'whatever'
self.events = [] # type: List[Dict[str, Any]]
def accepts_messages(self) -> bool:
return True
def accepts_event(self, event: Dict[str, Any]) -> bool:
assert(event['type'] == 'message')
return True
def add_event(self, event: Dict[str, Any]) -> None:
self.events.append(event)
client1 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=False,
)
client2 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=False,
)
client3 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=True,
)
client4 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=True,
)
client_info = {
'client:1': dict(
client=client1,
flags=['starred'],
),
'client:2': dict(
client=client2,
flags=['has_alert_word'],
),
'client:3': dict(
client=client3,
flags=[],
),
'client:4': dict(
client=client4,
flags=[],
),
}
sender = hamlet
message_event = dict(
message_dict=dict(
id=999,
content='**hello**',
rendered_content='<b>hello</b>',
sender_id=sender.id,
type='stream',
client='website',
# NOTE: Some of these fields are clutter, but some
# will be useful when we let clients specify
# that they can compute their own gravatar URLs.
sender_email=sender.email,
sender_realm_id=sender.realm_id,
sender_avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
sender_avatar_version=1,
sender_is_mirror_dummy=None,
recipient_type=None,
recipient_type_id=None,
),
)
# Setting users to `[]` bypasses code we don't care about
# for this test--we assume client_info is correct in our mocks,
# and we are interested in how messages are put on event queue.
users = [] # type: List[Dict[str, Any]]
with mock.patch('zerver.tornado.event_queue.get_client_info_for_message_event',
return_value=client_info):
process_message_event(message_event, users)
# We are not closely examining avatar_url at this point, so
# just sanity check them and then delete the keys so that
# upcoming comparisons work.
for client in [client1, client2]:
message = client.events[0]['message']
self.assertIn('gravatar.com', message['avatar_url'])
message.pop('avatar_url')
self.assertEqual(client1.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=['starred'],
),
])
self.assertEqual(client2.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=['has_alert_word'],
),
])
self.assertEqual(client3.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=[],
),
])
self.assertEqual(client4.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=[],
),
])
class FetchQueriesTest(ZulipTestCase):
def test_queries(self) -> None:
user = self.example_user("hamlet")
self.login(user.email)
flush_per_request_caches()
with queries_captured() as queries:
with mock.patch('zerver.lib.events.always_want') as want_mock:
fetch_initial_state_data(
user_profile=user,
event_types=None,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, 33)
expected_counts = dict(
alert_words=0,
custom_profile_fields=1,
default_streams=1,
default_stream_groups=1,
hotspots=0,
message=1,
muted_topics=1,
pointer=0,
presence=3,
realm=0,
realm_bot=1,
realm_domains=1,
realm_embedded_bots=0,
realm_incoming_webhook_bots=0,
realm_emoji=1,
realm_filters=1,
realm_user=3,
realm_user_groups=2,
recent_private_conversations=2,
starred_messages=1,
stream=2,
stop_words=0,
subscription=6,
update_display_settings=0,
update_global_notifications=0,
update_message_flags=5,
user_status=1,
zulip_version=0,
)
wanted_event_types = {
item[0][0] for item
in want_mock.call_args_list
}
self.assertEqual(wanted_event_types, set(expected_counts))
for event_type in sorted(wanted_event_types):
count = expected_counts[event_type]
flush_per_request_caches()
with queries_captured() as queries:
if event_type == 'update_message_flags':
event_types = ['update_message_flags', 'message']
else:
event_types = [event_type]
fetch_initial_state_data(
user_profile=user,
event_types=event_types,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, count)
class TestEventsRegisterAllPublicStreamsDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_use_passed_all_public_true_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
class TestGetRawUserDataSystemBotRealm(ZulipTestCase):
def test_get_raw_user_data_on_system_bot_realm(self) -> None:
result = get_raw_user_data(get_realm("zulipinternal"), self.example_user('hamlet'), True)
for bot_email in settings.CROSS_REALM_BOT_EMAILS:
bot_profile = get_system_bot(bot_email)
self.assertTrue(bot_profile.id in result)
self.assertTrue(result[bot_profile.id]['is_cross_realm_bot'])
| 41.856907 | 200 | 0.58101 |
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import copy
import os
import shutil
import sys
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from io import StringIO
from zerver.models import (
get_client, get_stream_recipient, get_stream, get_realm, get_system_bot,
Message, RealmDomain, Recipient, UserMessage, UserPresence, UserProfile,
Realm, Subscription, Stream, flush_per_request_caches, UserGroup, Service,
Attachment, PreregistrationUser, get_user_by_delivery_email, MultiuseInvite,
RealmAuditLog
)
from zerver.lib.actions import (
try_update_realm_custom_profile_field,
bulk_add_subscriptions,
bulk_remove_subscriptions,
check_add_realm_emoji,
check_send_message,
check_send_typing_notification,
do_add_alert_words,
do_add_default_stream,
do_add_reaction,
do_add_reaction_legacy,
do_add_realm_domain,
do_add_realm_filter,
do_add_streams_to_default_stream_group,
do_add_submessage,
do_change_avatar_fields,
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_default_stream_group_description,
do_change_default_stream_group_name,
do_change_full_name,
do_change_icon_source,
do_change_logo_source,
do_change_is_admin,
do_change_is_guest,
do_change_notification_settings,
do_change_plan_type,
do_change_realm_domain,
do_change_stream_description,
do_change_stream_invite_only,
do_change_stream_announcement_only,
do_change_subscription_property,
do_change_user_delivery_email,
do_create_user,
do_create_default_stream_group,
do_create_multiuse_invite_link,
do_deactivate_stream,
do_deactivate_user,
do_delete_messages,
do_invite_users,
do_mark_hotspot_as_read,
do_mute_topic,
do_reactivate_user,
do_regenerate_api_key,
do_remove_alert_words,
do_remove_default_stream,
do_remove_default_stream_group,
do_remove_reaction,
do_remove_reaction_legacy,
do_remove_realm_domain,
do_remove_realm_emoji,
do_remove_realm_filter,
do_remove_streams_from_default_stream_group,
do_rename_stream,
do_revoke_multi_use_invite,
do_revoke_user_invite,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_property,
do_set_user_display_setting,
do_set_realm_notifications_stream,
do_set_realm_signup_notifications_stream,
do_unmute_topic,
do_update_embedded_data,
do_update_message,
do_update_message_flags,
do_update_outgoing_webhook_service,
do_update_pointer,
do_update_user_presence,
do_update_user_status,
get_typing_user_profiles,
log_event,
lookup_default_stream_groups,
notify_realm_custom_profile_fields,
check_add_user_group,
do_update_user_group_name,
do_update_user_group_description,
bulk_add_members_to_user_group,
remove_members_from_user_group,
check_delete_user_group,
do_update_user_custom_profile_data_if_changed,
)
from zerver.lib.events import (
apply_events,
fetch_initial_state_data,
get_raw_user_data,
post_process_state,
)
from zerver.lib.message import (
aggregate_unread_data,
get_raw_unread_data,
render_markdown,
UnreadMessagesResult,
)
from zerver.lib.test_helpers import POSTRequestMock, get_subscription, \
get_test_image_file, stub_event_queue_user_events, queries_captured, \
create_dummy_file, stdout_suppressed
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.topic import (
ORIG_TOPIC,
TOPIC_NAME,
TOPIC_LINKS,
)
from zerver.lib.topic_mutes import (
add_topic_mute,
)
from zerver.lib.validator import (
check_bool, check_dict, check_dict_only, check_float, check_int, check_list, check_string,
equals, check_none_or, Validator, check_url
)
from zerver.lib.users import get_api_key
from zerver.views.events_register import _default_all_public_streams, _default_narrow
from zerver.tornado.event_queue import (
allocate_client_descriptor,
clear_client_event_queues_for_testing,
get_client_info_for_message_event,
process_message_event,
)
from zerver.tornado.views import get_events
import mock
import time
import ujson
class LogEventsTest(ZulipTestCase):
def test_with_missing_event_log_dir_setting(self) -> None:
with self.settings(EVENT_LOG_DIR=None):
log_event(dict())
def test_log_event_mkdir(self) -> None:
dir_name = os.path.join(settings.TEST_WORKER_DIR, "test-log-dir")
try:
shutil.rmtree(dir_name)
except OSError:
pass
self.assertFalse(os.path.exists(dir_name))
with self.settings(EVENT_LOG_DIR=dir_name):
event = {} # type: Dict[str, int]
log_event(event)
self.assertTrue(os.path.exists(dir_name))
class EventsEndpointTest(ZulipTestCase):
def test_events_register_endpoint(self) -> None:
# This test is intended to get minimal coverage on the
# events_register code paths
email = self.example_email("hamlet")
with mock.patch('zerver.views.events_register.do_events_register', return_value={}):
result = self.api_post(email, '/json/register')
self.assert_json_success(result)
with mock.patch('zerver.lib.events.request_event_queue', return_value=None):
result = self.api_post(email, '/json/register')
self.assert_json_error(result, "Could not allocate event queue")
return_event_queue = '15:11'
return_user_events = [] # type: List[Dict[str, Any]]
# Test that call is made to deal with a returning soft deactivated user.
with mock.patch('zerver.lib.events.reactivate_user_if_soft_deactivated') as fa:
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assertEqual(fa.call_count, 1)
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], -1)
self.assertEqual(result_dict['queue_id'], '15:11')
return_event_queue = '15:12'
return_user_events = [
{
'id': 6,
'type': 'pointer',
'pointer': 15,
}
]
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register', dict(event_types=ujson.dumps(['pointer'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:12')
# Now test with `fetch_event_types` not matching the event
return_event_queue = '15:13'
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(event_types=ujson.dumps(['pointer']),
fetch_event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that the message event types data is in there
self.assertIn('max_message_id', result_dict)
# Check that the pointer event types data is not in there
self.assertNotIn('pointer', result_dict)
self.assertEqual(result_dict['queue_id'], '15:13')
# Now test with `fetch_event_types` matching the event
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(email, '/json/register',
dict(fetch_event_types=ujson.dumps(['pointer']),
event_types=ujson.dumps(['message'])))
self.assert_json_success(result)
result_dict = result.json()
self.assertEqual(result_dict['last_event_id'], 6)
# Check that we didn't fetch the messages data
self.assertNotIn('max_message_id', result_dict)
self.assertIn('pointer', result_dict)
self.assertEqual(result_dict['pointer'], 15)
self.assertEqual(result_dict['queue_id'], '15:13')
def test_tornado_endpoint(self) -> None:
# This test is mostly intended to get minimal coverage on
# the /notify_tornado endpoint, so we can have 100% URL coverage,
# but it does exercise a little bit of the codepath.
post_data = dict(
data=ujson.dumps(
dict(
event=dict(
type='other'
),
users=[self.example_user('hamlet').id],
),
),
)
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_error(result, 'Access denied', status_code=403)
post_data['secret'] = settings.SHARED_SECRET
req = POSTRequestMock(post_data, user_profile=None)
req.META['REMOTE_ADDR'] = '127.0.0.1'
result = self.client_post_request('/notify_tornado', req)
self.assert_json_success(result)
class GetEventsTest(ZulipTestCase):
def tornado_call(self, view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any]) -> HttpResponse:
request = POSTRequestMock(post_data, user_profile)
return view_func(request, user_profile)
def test_get_events(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
recipient_user_profile = self.example_user('othello')
recipient_email = recipient_user_profile.email
self.login(email)
result = self.tornado_call(get_events, user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"apply_markdown": ujson.dumps(True),
"client_gravatar": ujson.dumps(True),
"event_types": ujson.dumps(["message"]),
"user_client": "website",
"dont_block": ujson.dumps(True),
})
self.assert_json_success(recipient_result)
recipient_queue_id = ujson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = '10.01'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id = '10.02'
check_send_message(
sender=user_profile,
client=get_client('whatever'),
message_type_name='private',
message_to=[recipient_email],
topic_name=None,
message_content='hello',
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
recipient_result = self.tornado_call(get_events, recipient_user_profile,
{"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
recipient_events = ujson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assertEqual(len(recipient_events), 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self) -> None:
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
def get_message(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
result = self.tornado_call(
get_events,
user_profile,
dict(
apply_markdown=ujson.dumps(apply_markdown),
client_gravatar=ujson.dumps(client_gravatar),
event_types=ujson.dumps(["message"]),
narrow=ujson.dumps([["stream", "denmark"]]),
user_client="website",
dont_block=ujson.dumps(True),
)
)
self.assert_json_success(result)
queue_id = ujson.loads(result.content)["queue_id"]
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_personal_message(email, self.example_email("othello"), "hello")
self.send_stream_message(email, "Denmark", "**hello**")
result = self.tornado_call(get_events, user_profile,
{"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": ujson.dumps(True),
})
events = ujson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
return events[0]['message']
message = get_message(apply_markdown=False, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=True, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertIn('gravatar.com', message["avatar_url"])
message = get_message(apply_markdown=False, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(message["avatar_url"], None)
message = get_message(apply_markdown=True, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertEqual(message["avatar_url"], None)
class EventsRegisterTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
def create_bot(self, email: str, **extras: Any) -> Optional[UserProfile]:
return self.create_test_bot(email, self.user_profile, **extras)
def realm_bot_schema(self, field_name: str, check: Validator) -> Validator:
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
(field_name, check),
])),
])
def do_test(self, action: Callable[[], object], event_types: Optional[List[str]]=None,
include_subscribers: bool=True, state_change_expected: bool=True,
notification_settings_null: bool=False,
client_gravatar: bool=False, num_events: int=1) -> List[Dict[str, Any]]:
clear_client_event_queues_for_testing()
client = allocate_client_descriptor(
dict(user_profile_id = self.user_profile.id,
user_profile_email = self.user_profile.email,
realm_id = self.user_profile.realm_id,
event_types = event_types,
client_type_name = "website",
apply_markdown = True,
client_gravatar = client_gravatar,
all_public_streams = False,
queue_timeout = 600,
last_connection_time = time.time(),
narrow = [])
)
hybrid_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers
)
action()
events = client.event_queue.contents()
self.assertEqual(len(events), num_events)
initial_state = copy.deepcopy(hybrid_state)
post_process_state(self.user_profile, initial_state, notification_settings_null)
before = ujson.dumps(initial_state)
apply_events(hybrid_state, events, self.user_profile,
client_gravatar=True, include_subscribers=include_subscribers)
post_process_state(self.user_profile, hybrid_state, notification_settings_null)
after = ujson.dumps(hybrid_state)
if state_change_expected:
if before == after:
print(ujson.dumps(initial_state, indent=2))
print(events)
raise AssertionError('Test does not exercise enough code -- events do not change state.')
else:
try:
self.match_states(initial_state, copy.deepcopy(hybrid_state), events)
except AssertionError:
raise AssertionError('Test is invalid--state actually does change here.')
normal_state = fetch_initial_state_data(
self.user_profile, event_types, "",
client_gravatar=True,
include_subscribers=include_subscribers,
)
post_process_state(self.user_profile, normal_state, notification_settings_null)
self.match_states(hybrid_state, normal_state, events)
return events
def assert_on_error(self, error: Optional[str]) -> None:
if error:
raise AssertionError(error)
def match_states(self, state1: Dict[str, Any], state2: Dict[str, Any],
events: List[Dict[str, Any]]) -> None:
def normalize(state: Dict[str, Any]) -> None:
for u in state['never_subscribed']:
if 'subscribers' in u:
u['subscribers'].sort()
for u in state['subscriptions']:
if 'subscribers' in u:
u['subscribers'].sort()
state['subscriptions'] = {u['name']: u for u in state['subscriptions']}
state['unsubscribed'] = {u['name']: u for u in state['unsubscribed']}
if 'realm_bots' in state:
state['realm_bots'] = {u['email']: u for u in state['realm_bots']}
normalize(state1)
normalize(state2)
self.assertEqual(state1.keys(), state2.keys())
if state1 != state2:
print('\n---States DO NOT MATCH---')
print('\nEVENTS:\n')
import json
for event in events:
print(json.dumps(event, indent=4))
print('\nMISMATCHES:\n')
for k in state1:
if state1[k] != state2[k]:
print('\nkey = ' + k)
try:
self.assertEqual({k: state1[k]}, {k: state2[k]})
except AssertionError as e:
print(e)
print('''
NOTE:
This is an advanced test that verifies how
we apply events after fetching data. If you
do not know how to debug it, you can ask for
help on chat.
''')
sys.stdout.flush()
raise AssertionError('Mismatching states')
def check_events_dict(self, required_keys: List[Tuple[str, Validator]]) -> Validator:
required_keys.append(('id', check_int))
keys = [key[0] for key in required_keys]
self.assertEqual(len(keys), len(set(keys)), 'Duplicate items found in required_keys.')
return check_dict_only(required_keys)
def test_mentioned_send_message_events(self) -> None:
user = self.example_user('hamlet')
for i in range(3):
content = 'mentioning... @**' + user.full_name + '** hello ' + str(i)
self.do_test(
lambda: self.send_stream_message(self.example_email('cordelia'),
"Verona",
content)
)
def test_wildcard_mentioned_send_message_events(self) -> None:
for i in range(3):
content = 'mentioning... @**all** hello ' + str(i)
self.do_test(
lambda: self.send_stream_message(self.example_email('cordelia'),
"Verona",
content)
)
def test_pm_send_message_events(self) -> None:
self.do_test(
lambda: self.send_personal_message(self.example_email('cordelia'),
self.example_email('hamlet'),
'hola')
)
def test_huddle_send_message_events(self) -> None:
huddle = [
self.example_email('hamlet'),
self.example_email('othello'),
]
self.do_test(
lambda: self.send_huddle_message(self.example_email('cordelia'),
huddle,
'hola')
)
def test_stream_send_message_events(self) -> None:
def get_checker(check_gravatar: Validator) -> Validator:
schema_checker = self.check_events_dict([
('type', equals('message')),
('flags', check_list(None)),
('message', self.check_events_dict([
('avatar_url', check_gravatar),
('client', check_string),
('content', check_string),
('content_type', equals('text/html')),
('display_recipient', check_string),
('is_me_message', check_bool),
('reactions', check_list(None)),
('recipient_id', check_int),
('sender_realm_str', check_string),
('sender_email', check_string),
('sender_full_name', check_string),
('sender_id', check_int),
('sender_short_name', check_string),
('stream_id', check_int),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('submessages', check_list(None)),
('timestamp', check_int),
('type', check_string),
])),
])
return schema_checker
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=False,
)
schema_checker = get_checker(check_gravatar=check_string)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Verona", "hello"),
client_gravatar=True,
)
schema_checker = get_checker(check_gravatar=equals(None))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('edit_timestamp', check_int),
('message_id', check_int),
('message_ids', check_list(check_int)),
('prior_mention_user_ids', check_list(check_int)),
('mention_user_ids', check_list(check_int)),
('presence_idle_user_ids', check_list(check_int)),
('stream_push_user_ids', check_list(check_int)),
('stream_email_user_ids', check_list(check_int)),
('push_notify_user_ids', check_list(check_int)),
('orig_content', check_string),
('orig_rendered_content', check_string),
(ORIG_TOPIC, check_string),
('prev_rendered_content_version', check_int),
('propagate_mode', check_string),
('rendered_content', check_string),
('sender', check_string),
('stream_id', check_int),
('stream_name', check_string),
(TOPIC_NAME, check_string),
(TOPIC_LINKS, check_list(None)),
('user_id', check_int),
('is_me_message', check_bool),
])
message = Message.objects.order_by('-id')[0]
topic = 'new_topic'
propagate_mode = 'change_all'
content = 'new content'
rendered_content = render_markdown(message, content)
prior_mention_user_ids = set()
mentioned_user_ids = set()
events = self.do_test(
lambda: do_update_message(self.user_profile, message, topic,
propagate_mode, content, rendered_content,
prior_mention_user_ids,
mentioned_user_ids),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('update_message')),
('flags', check_list(None)),
('content', check_string),
('message_id', check_int),
('message_ids', check_list(check_int)),
('rendered_content', check_string),
('sender', check_string),
])
events = self.do_test(
lambda: do_update_embedded_data(self.user_profile, message,
u"embed_content", "<p>embed_content</p>"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_message_flags(self) -> None:
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("add")),
])
message = self.send_personal_message(
self.example_email("cordelia"),
self.example_email("hamlet"),
"hello",
)
user_profile = self.example_user('hamlet')
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('all', check_bool),
('type', equals('update_message_flags')),
('flag', check_string),
('messages', check_list(check_int)),
('operation', equals("remove")),
])
events = self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'remove', 'starred', [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_update_read_flag_removes_unread_msg_ids(self) -> None:
user_profile = self.example_user('hamlet')
mention = '@**' + user_profile.full_name + '**'
for content in ['hello', mention]:
message = self.send_stream_message(
self.example_email('cordelia'),
"Verona",
content
)
self.do_test(
lambda: do_update_message_flags(user_profile, get_client("website"), 'add', 'read', [message]),
state_change_expected=True,
)
def test_send_message_to_existing_recipient(self) -> None:
self.send_stream_message(
self.example_email('cordelia'),
"Verona",
"hello 1"
)
self.do_test(
lambda: self.send_stream_message("cordelia@zulip.com", "Verona", "hello 2"),
state_change_expected=True,
)
def test_add_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction_legacy(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction_legacy(self.user_profile, message, "tada")
events = self.do_test(
lambda: do_remove_reaction_legacy(
self.user_profile, message, "tada"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('add')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
events = self.do_test(
lambda: do_add_reaction(
self.user_profile, message, "tada", "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_add_submessage(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('submessage')),
('message_id', check_int),
('submessage_id', check_int),
('sender_id', check_int),
('msg_type', check_string),
('content', check_string),
])
cordelia = self.example_user('cordelia')
stream_name = 'Verona'
message_id = self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
)
events = self.do_test(
lambda: do_add_submessage(
realm=cordelia.realm,
sender_id=cordelia.id,
message_id=message_id,
msg_type='whatever',
content='"stuff"',
),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_remove_reaction(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('reaction')),
('op', equals('remove')),
('message_id', check_int),
('emoji_name', check_string),
('emoji_code', check_string),
('reaction_type', check_string),
('user', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int)
])),
])
message_id = self.send_stream_message(self.example_email("hamlet"), "Verona", "hello")
message = Message.objects.get(id=message_id)
do_add_reaction(self.user_profile, message, "tada", "1f389", "unicode_emoji")
events = self.do_test(
lambda: do_remove_reaction(
self.user_profile, message, "1f389", "unicode_emoji"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invite_user_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(
lambda: do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_user_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_users = PreregistrationUser.objects.filter(referred_by__realm=self.user_profile.realm)
events = self.do_test(
lambda: do_revoke_user_invite(prereg_users[0]),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_revoke_multiuse_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Verona"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_multiuse_invite_link(self.user_profile, PreregistrationUser.INVITE_AS['MEMBER'], streams)
multiuse_object = MultiuseInvite.objects.get()
events = self.do_test(
lambda: do_revoke_multi_use_invite(multiuse_object),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_invitation_accept_invite_event(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('invites_changed')),
])
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_users = PreregistrationUser.objects.get(email="foo@zulip.com")
events = self.do_test(
lambda: do_create_user('foo@zulip.com', 'password', self.user_profile.realm,
'full name', 'short name', prereg_user=prereg_users),
state_change_expected=True,
num_events=5,
)
error = schema_checker('events[4]', events[4])
self.assert_on_error(error)
def test_typing_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('typing')),
('op', equals('start')),
('sender', check_dict_only([
('email', check_string),
('user_id', check_int)])),
('recipients', check_list(check_dict_only([
('email', check_string),
('user_id', check_int),
]))),
])
events = self.do_test(
lambda: check_send_typing_notification(
self.user_profile, [self.example_email("cordelia")], "start"),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_get_typing_user_profiles(self) -> None:
sender_profile = self.example_user('cordelia')
stream = get_stream('Rome', sender_profile.realm)
with self.assertRaisesRegex(ValueError, 'not supported for streams'):
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
get_typing_user_profiles(recipient, sender_profile.id)
with self.assertRaisesRegex(ValueError, 'Bad recipient type'):
recipient = Recipient(type=999)
get_typing_user_profiles(recipient, sender_profile.id)
def test_custom_profile_fields_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('custom_profile_fields')),
('op', equals('add')),
('fields', check_list(check_dict_only([
('id', check_int),
('type', check_int),
('name', check_string),
('hint', check_string),
('field_data', check_string),
('order', check_int),
]))),
])
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
realm = self.user_profile.realm
field = realm.customprofilefield_set.get(realm=realm, name='Biography')
name = field.name
hint = 'Biography of the user'
try_update_realm_custom_profile_field(realm, field, name, hint=hint)
events = self.do_test(
lambda: notify_realm_custom_profile_fields(
self.user_profile.realm, 'add'),
state_change_expected=False,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_custom_profile_field_data_events(self) -> None:
schema_checker_basic = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
])),
])),
])
schema_checker_with_rendered_value = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('custom_profile_field', check_dict([
('id', check_int),
('value', check_none_or(check_string)),
('rendered_value', check_none_or(check_string)),
])),
])),
])
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Biography').id
field = {
"id": field_id,
"value": "New value",
}
events = self.do_test(lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]))
error = schema_checker_with_rendered_value('events[0]', events[0])
self.assert_on_error(error)
field_id = self.user_profile.realm.customprofilefield_set.get(
realm=self.user_profile.realm, name='Mentor').id
field = {
"id": field_id,
"value": [self.example_user("ZOE").id],
}
events = self.do_test(lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]))
error = schema_checker_basic('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('website', check_dict_only([
('status', equals('active')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_presence_events_multiple_clients(self) -> None:
schema_checker_android = self.check_events_dict([
('type', equals('presence')),
('email', check_string),
('server_timestamp', check_float),
('presence', check_dict_only([
('ZulipAndroid/1.0', check_dict_only([
('status', equals('idle')),
('timestamp', check_int),
('client', check_string),
('pushable', check_bool),
])),
])),
])
self.api_post(self.user_profile.email, "/api/v1/users/me/presence", {'status': 'idle'},
HTTP_USER_AGENT="ZulipAndroid/1.0")
self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE))
events = self.do_test(lambda: do_update_user_presence(
self.user_profile, get_client("ZulipAndroid/1.0"), timezone_now(), UserPresence.IDLE))
error = schema_checker_android('events[0]', events[0])
self.assert_on_error(error)
def test_pointer_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('pointer')),
('pointer', check_int)
])
events = self.do_test(lambda: do_update_pointer(self.user_profile, get_client("website"), 1500))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_register_events(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "test1@zulip.com")
def test_register_events_email_address_visibility(self) -> None:
realm_user_add_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('add')),
('person', check_dict_only([
('user_id', check_int),
('email', check_string),
('avatar_url', check_none_or(check_string)),
('full_name', check_string),
('is_admin', check_bool),
('is_bot', check_bool),
('is_guest', check_bool),
('profile_data', check_dict_only([])),
('timezone', check_string),
('date_joined', check_string),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
events = self.do_test(lambda: self.register("test1@zulip.com", "test1"))
self.assert_length(events, 1)
error = realm_user_add_checker('events[0]', events[0])
self.assert_on_error(error)
new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm)
self.assertEqual(new_user_profile.email, "user%s@zulip.testserver" % (new_user_profile.id,))
def test_alert_words_events(self) -> None:
alert_words_checker = self.check_events_dict([
('type', equals('alert_words')),
('alert_words', check_list(check_string)),
])
events = self.do_test(lambda: do_add_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_alert_words(self.user_profile, ["alert_word"]))
error = alert_words_checker('events[0]', events[0])
self.assert_on_error(error)
def test_away_events(self) -> None:
checker = self.check_events_dict([
('type', equals('user_status')),
('user_id', check_int),
('away', check_bool),
('status_text', check_string),
])
client = get_client("website")
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=True,
status_text='out to lunch',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_update_user_status(user_profile=self.user_profile,
away=False,
status_text='',
client_id=client.id))
error = checker('events[0]', events[0])
self.assert_on_error(error)
def test_user_group_events(self) -> None:
user_group_add_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add')),
('group', check_dict_only([
('id', check_int),
('name', check_string),
('members', check_list(check_int)),
('description', check_string),
])),
])
othello = self.example_user('othello')
events = self.do_test(lambda: check_add_user_group(self.user_profile.realm,
'backend', [othello],
'Backend team'))
error = user_group_add_checker('events[0]', events[0])
self.assert_on_error(error)
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('name', check_string),
])),
])
backend = UserGroup.objects.get(name='backend')
events = self.do_test(lambda: do_update_user_group_name(backend, 'backendteam'))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
user_group_update_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('update')),
('group_id', check_int),
('data', check_dict_only([
('description', check_string),
])),
])
description = "Backend team to deal with backend code."
events = self.do_test(lambda: do_update_user_group_description(backend, description))
error = user_group_update_checker('events[0]', events[0])
self.assert_on_error(error)
user_group_add_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('add_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: bulk_add_members_to_user_group(backend, [hamlet]))
error = user_group_add_member_checker('events[0]', events[0])
self.assert_on_error(error)
user_group_remove_member_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove_members')),
('group_id', check_int),
('user_ids', check_list(check_int)),
])
hamlet = self.example_user('hamlet')
events = self.do_test(lambda: remove_members_from_user_group(backend, [hamlet]))
error = user_group_remove_member_checker('events[0]', events[0])
self.assert_on_error(error)
user_group_remove_checker = self.check_events_dict([
('type', equals('user_group')),
('op', equals('remove')),
('group_id', check_int),
])
events = self.do_test(lambda: check_delete_user_group(backend.id, othello))
error = user_group_remove_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_groups_events(self) -> None:
default_stream_groups_checker = self.check_events_dict([
('type', equals('default_stream_groups')),
('default_stream_groups', check_list(check_dict_only([
('name', check_string),
('id', check_int),
('description', check_string),
('streams', check_list(check_dict_only([
('description', check_string),
('rendered_description', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('name', check_string),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool)]))),
]))),
])
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
events = self.do_test(lambda: do_create_default_stream_group(
self.user_profile.realm, "group1", "This is group1", streams))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
venice_stream = get_stream("Venice", self.user_profile.realm)
events = self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_streams_from_default_stream_group(self.user_profile.realm,
group, [venice_stream]))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_description(self.user_profile.realm,
group, "New description"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_change_default_stream_group_name(self.user_profile.realm,
group, "New Group Name"))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_default_stream_group(self.user_profile.realm, group))
error = default_stream_groups_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_stream_group_events_guest(self) -> None:
streams = []
for stream_name in ["Scotland", "Verona", "Denmark"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_create_default_stream_group(self.user_profile.realm, "group1",
"This is group1", streams)
group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0]
do_change_is_guest(self.user_profile, True)
venice_stream = get_stream("Venice", self.user_profile.realm)
self.do_test(lambda: do_add_streams_to_default_stream_group(self.user_profile.realm,
group, [venice_stream]),
state_change_expected = False, num_events=0)
def test_default_streams_events(self) -> None:
default_streams_checker = self.check_events_dict([
('type', equals('default_streams')),
('default_streams', check_list(check_dict_only([
('description', check_string),
('invite_only', check_bool),
('name', check_string),
('stream_id', check_int),
]))),
])
stream = get_stream("Scotland", self.user_profile.realm)
events = self.do_test(lambda: do_add_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
events = self.do_test(lambda: do_remove_default_stream(stream))
error = default_streams_checker('events[0]', events[0])
self.assert_on_error(error)
def test_default_streams_events_guest(self) -> None:
do_change_is_guest(self.user_profile, True)
stream = get_stream("Scotland", self.user_profile.realm)
self.do_test(lambda: do_add_default_stream(stream),
state_change_expected = False, num_events=0)
self.do_test(lambda: do_remove_default_stream(stream),
state_change_expected = False, num_events=0)
def test_muted_topics_events(self) -> None:
muted_topics_checker = self.check_events_dict([
('type', equals('muted_topics')),
('muted_topics', check_list(check_list(check_string, 2))),
])
stream = get_stream('Denmark', self.user_profile.realm)
recipient = get_stream_recipient(stream.id)
events = self.do_test(lambda: do_mute_topic(
self.user_profile, stream, recipient, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_unmute_topic(
self.user_profile, stream, "topic"))
error = muted_topics_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_avatar_fields(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_string),
('avatar_url_medium', check_string),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_USER),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('avatar_url', check_none_or(check_string)),
('avatar_url_medium', check_none_or(check_string)),
('avatar_source', check_string),
])),
])
events = self.do_test(
lambda: do_change_avatar_fields(self.user_profile, UserProfile.AVATAR_FROM_GRAVATAR),
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_full_name(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
events = self.do_test(lambda: do_change_full_name(self.user_profile, 'Sir Hamlet', self.user_profile))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_user_delivery_email_email_address_visibilty_admins(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('delivery_email', check_string),
('user_id', check_int),
])),
])
do_set_realm_property(self.user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
# for email being passed into this next function.
self.user_profile.refresh_from_db()
action = lambda: do_change_user_delivery_email(self.user_profile, 'newhamlet@zulip.com')
events = self.do_test(action, num_events=1)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_realm_property_test(self, name: str) -> None:
bool_tests = [True, False, True] # type: List[bool]
test_values = dict(
default_language=[u'es', u'de', u'en'],
description=[u'Realm description', u'New description'],
digest_weekday=[0, 1, 2],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
create_stream_policy=[3, 2, 1],
invite_to_stream_policy=[3, 2, 1],
email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS],
bot_creation_policy=[Realm.BOT_CREATION_EVERYONE],
video_chat_provider=[
Realm.VIDEO_CHAT_PROVIDERS['jitsi_meet']['id'],
Realm.VIDEO_CHAT_PROVIDERS['google_hangouts']['id']
],
google_hangouts_domain=[u"zulip.com", u"zulip.org"],
zoom_api_secret=[u"abc", u"xyz"],
zoom_api_key=[u"abc", u"xyz"],
zoom_user_id=[u"example@example.com", u"example@example.org"]
) # type: Dict[str, Any]
vals = test_values.get(name)
property_type = Realm.property_types[name]
if property_type is bool:
validator = check_bool
vals = bool_tests
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
elif property_type == (int, type(None)):
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals(name)),
('value', validator),
])
if vals is None:
raise AssertionError('No test created for %s' % (name,))
do_set_realm_property(self.user_profile.realm, name, vals[0])
for val in vals[1:]:
state_change_expected = True
if name == "zoom_api_secret":
state_change_expected = False
events = self.do_test(
lambda: do_set_realm_property(self.user_profile.realm, name, val),
state_change_expected=state_change_expected)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Actually runs several full-stack fetching tests")
def test_change_realm_property(self) -> None:
for prop in Realm.property_types:
with self.settings(SEND_DIGEST_EMAILS=True):
self.do_set_realm_property_test(prop)
@slow("Runs a large matrix of tests")
def test_change_realm_authentication_methods(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('authentication_methods', check_dict([]))
])),
])
def fake_backends() -> Any:
backends = (
'zproject.backends.DevAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.GitHubAuthBackend',
'zproject.backends.GoogleAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend',
)
return self.settings(AUTHENTICATION_BACKENDS=backends)
# Test transitions; any new backends should be tested with T/T/T/F/T
for (auth_method_dict) in \
({'Google': True, 'Email': True, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': True, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': False},
{'Google': True, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': False},
{'Google': False, 'Email': False, 'GitHub': False, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': False, 'GitHub': True, 'LDAP': False, 'Dev': True},
{'Google': False, 'Email': True, 'GitHub': True, 'LDAP': True, 'Dev': False}):
with fake_backends():
events = self.do_test(
lambda: do_set_realm_authentication_methods(
self.user_profile.realm,
auth_method_dict))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_pin_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals('pin_to_top')),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", False)
for pinned in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream, "pin_to_top", pinned))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_stream_notification_settings(self) -> None:
for setting_name in ['email_notifications']:
schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('update')),
('property', equals(setting_name)),
('stream_id', check_int),
('value', check_bool),
('name', check_string),
('email', check_string),
])
stream = get_stream("Denmark", self.user_profile.realm)
sub = get_subscription(stream.name, self.user_profile)
# First test with notification_settings_null enabled
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value),
notification_settings_null=True)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
for value in (True, False):
events = self.do_test(lambda: do_change_subscription_property(self.user_profile, sub, stream,
setting_name, value))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
@slow("Runs a matrix of 6 queries to the /home view")
def test_change_realm_message_edit_settings(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('default')),
('data', check_dict_only([
('allow_message_editing', check_bool),
('message_content_edit_limit_seconds', check_int),
('allow_community_topic_editing', check_bool),
])),
])
# Test every transition among the four possibilities {T,F} x {0, non-0}
for (allow_message_editing, message_content_edit_limit_seconds) in \
((True, 0), (False, 0), (False, 1234),
(True, 600), (False, 0), (True, 1234)):
events = self.do_test(
lambda: do_set_realm_message_editing(self.user_profile.realm,
allow_message_editing,
message_content_edit_limit_seconds,
False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for notifications_stream, notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_notifications_stream(self.user_profile.realm,
notifications_stream,
notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_signup_notifications_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('signup_notifications_stream_id')),
('value', check_int),
])
stream = get_stream("Rome", self.user_profile.realm)
for signup_notifications_stream, signup_notifications_stream_id in ((stream, stream.id), (None, -1)):
events = self.do_test(
lambda: do_set_realm_signup_notifications_stream(self.user_profile.realm,
signup_notifications_stream,
signup_notifications_stream_id))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_is_admin(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('is_admin', check_bool),
('user_id', check_int),
])),
])
do_change_is_admin(self.user_profile, False)
for is_admin in [True, False]:
events = self.do_test(lambda: do_change_is_admin(self.user_profile, is_admin))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def do_set_user_display_settings_test(self, setting_name: str) -> None:
test_changes = dict(
emojiset = [u'twitter'],
default_language = [u'es', u'de', u'en'],
timezone = [u'US/Mountain', u'US/Samoa', u'Pacific/Galapogos', u''],
demote_inactive_streams = [2, 3, 1],
) # type: Dict[str, Any]
property_type = UserProfile.property_types[setting_name]
if property_type is bool:
validator = check_bool
elif property_type is str:
validator = check_string
elif property_type is int:
validator = check_int
else:
raise AssertionError("Unexpected property type %s" % (property_type,))
num_events = 1
if setting_name == "timezone":
num_events = 2
values = test_changes.get(setting_name)
if property_type is bool:
if getattr(self.user_profile, setting_name) is False:
values = [True, False, True]
else:
values = [False, True, False]
if values is None:
raise AssertionError('No test created for %s' % (setting_name,))
for value in values:
events = self.do_test(lambda: do_set_user_display_setting(
self.user_profile, setting_name, value), num_events=num_events)
schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
language_schema_checker = self.check_events_dict([
('type', equals('update_display_settings')),
('language_name', check_string),
('setting_name', equals(setting_name)),
('user', check_string),
('setting', validator),
])
if setting_name == "default_language":
error = language_schema_checker('events[0]', events[0])
else:
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
timezone_schema_checker = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('email', check_string),
('user_id', check_int),
('timezone', check_string),
])),
])
if setting_name == "timezone":
error = timezone_schema_checker('events[1]', events[1])
@slow("Actually runs several full-stack fetching tests")
def test_set_user_display_settings(self) -> None:
for prop in UserProfile.property_types:
self.do_set_user_display_settings_test(prop)
@slow("Actually runs several full-stack fetching tests")
def test_change_notification_settings(self) -> None:
for notification_setting, v in self.user_profile.notification_setting_types.items():
if notification_setting in ["notification_sound", "desktop_icon_count_display"]:
# These settings are tested in their own tests.
continue
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', check_bool),
])
do_change_notification_settings(self.user_profile, notification_setting, False)
for setting_value in [True, False]:
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Also test with notification_settings_null=True
events = self.do_test(
lambda: do_change_notification_settings(
self.user_profile, notification_setting, setting_value, log=False),
notification_settings_null=True,
state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_notification_sound(self) -> None:
notification_setting = "notification_sound"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals("ding")),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 'ding', log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_desktop_icon_count_display(self) -> None:
notification_setting = "desktop_icon_count_display"
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(2)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 2, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('update_global_notifications')),
('notification_name', equals(notification_setting)),
('user', check_string),
('setting', equals(1)),
])
events = self.do_test(lambda: do_change_notification_settings(
self.user_profile, notification_setting, 1, log=False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_update_plan_type(self) -> None:
realm = self.user_profile.realm
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.SELF_HOSTED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update')),
('property', equals('plan_type')),
('value', equals(Realm.LIMITED)),
('extra_data', check_dict_only([
('upload_quota', check_int)
])),
])
events = self.do_test(lambda: do_change_plan_type(realm, Realm.LIMITED))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
state_data = fetch_initial_state_data(self.user_profile, None, "", False)
self.assertEqual(state_data['realm_plan_type'], Realm.LIMITED)
self.assertEqual(state_data['plan_includes_wide_organization_logo'], False)
def test_realm_emoji_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_emoji')),
('op', equals('update')),
('realm_emoji', check_dict([])),
])
author = self.example_user('iago')
with get_test_image_file('img.png') as img_file:
events = self.do_test(lambda: check_add_realm_emoji(self.user_profile.realm,
"my_emoji",
author,
img_file))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
events = self.do_test(lambda: do_remove_realm_emoji(self.user_profile.realm, "my_emoji"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_filter_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_filters')),
('realm_filters', check_list(None)), # TODO: validate tuples in the list
])
events = self.do_test(lambda: do_add_realm_filter(self.user_profile.realm, "#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
self.do_test(lambda: do_remove_realm_filter(self.user_profile.realm, "#(?P<id>[123])"))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_realm_domain_events(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('add')),
('realm_domain', check_dict_only([
('domain', check_string),
('allow_subdomains', check_bool),
])),
])
events = self.do_test(lambda: do_add_realm_domain(
self.user_profile.realm, 'zulip.org', False))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('change')),
('realm_domain', check_dict_only([
('domain', equals('zulip.org')),
('allow_subdomains', equals(True)),
])),
])
test_domain = RealmDomain.objects.get(realm=self.user_profile.realm,
domain='zulip.org')
events = self.do_test(lambda: do_change_realm_domain(test_domain, True))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('realm_domains')),
('op', equals('remove')),
('domain', equals('zulip.org')),
])
events = self.do_test(lambda: do_remove_realm_domain(test_domain))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_create_bot(self) -> None:
def get_bot_created_checker(bot_type: str) -> Validator:
if bot_type == "GENERIC_BOT":
check_services = check_list(sub_validator=None, length=0)
elif bot_type == "OUTGOING_WEBHOOK_BOT":
check_services = check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]), length=1)
elif bot_type == "EMBEDDED_BOT":
check_services = check_list(check_dict_only([
('service_name', check_string),
('config_data', check_dict(value_validator=check_string)),
]), length=1)
return self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
action = lambda: self.create_bot('test')
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="GENERIC_BOT")('events[1]', events[1])
self.assert_on_error(error)
action = lambda: self.create_bot('test_outgoing_webhook',
full_name='Outgoing Webhook Bot',
payload_url=ujson.dumps('https://foo.bar.com'),
interface_type=Service.GENERIC,
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT)
events = self.do_test(action, num_events=3)
# The third event is the second call of notify_created_bot, which contains additional
# data for services (in contrast to the first call).
error = get_bot_created_checker(bot_type="OUTGOING_WEBHOOK_BOT")('events[2]', events[2])
self.assert_on_error(error)
action = lambda: self.create_bot('test_embedded',
full_name='Embedded Bot',
service_name='helloworld',
config_data=ujson.dumps({'foo': 'bar'}),
bot_type=UserProfile.EMBEDDED_BOT)
events = self.do_test(action, num_events=3)
error = get_bot_created_checker(bot_type="EMBEDDED_BOT")('events[2]', events[2])
self.assert_on_error(error)
def test_change_bot_full_name(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_full_name(bot, 'New Bot Name', self.user_profile)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('full_name', check_string)('events[1]', events[1])
self.assert_on_error(error)
def test_regenerate_bot_api_key(self) -> None:
bot = self.create_bot('test')
action = lambda: do_regenerate_api_key(bot, self.user_profile)
events = self.do_test(action)
error = self.realm_bot_schema('api_key', check_string)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_avatar_source(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_avatar_fields(bot, bot.AVATAR_FROM_USER)
events = self.do_test(action, num_events=2)
error = self.realm_bot_schema('avatar_url', check_string)('events[0]', events[0])
self.assertEqual(events[1]['type'], 'realm_user')
self.assert_on_error(error)
def test_change_realm_icon_source(self) -> None:
action = lambda: do_change_icon_source(self.user_profile.realm, Realm.ICON_UPLOADED)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('icon')),
('data', check_dict_only([
('icon_url', check_string),
('icon_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_day_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, False)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('logo')),
('data', check_dict_only([
('logo_url', check_string),
('logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_realm_night_mode_logo_source(self) -> None:
action = lambda: do_change_logo_source(self.user_profile.realm, Realm.LOGO_UPLOADED, True)
events = self.do_test(action, state_change_expected=True)
schema_checker = self.check_events_dict([
('type', equals('realm')),
('op', equals('update_dict')),
('property', equals('night_logo')),
('data', check_dict_only([
('night_logo_url', check_string),
('night_logo_source', check_string),
])),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_all_public_streams(self) -> None:
bot = self.create_bot('test')
action = lambda: do_change_default_all_public_streams(bot, True)
events = self.do_test(action)
error = self.realm_bot_schema('default_all_public_streams', check_bool)('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_sending_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_sending_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_sending_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_sending_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_default_events_register_stream(self) -> None:
bot = self.create_bot('test')
stream = get_stream("Rome", bot.realm)
action = lambda: do_change_default_events_register_stream(bot, stream)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', check_string)('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_default_events_register_stream(bot, None)
events = self.do_test(action)
error = self.realm_bot_schema('default_events_register_stream', equals(None))('events[0]', events[0])
self.assert_on_error(error)
def test_change_bot_owner(self) -> None:
change_bot_owner_checker_user = self.check_events_dict([
('type', equals('realm_user')),
('op', equals('update')),
('person', check_dict_only([
('user_id', check_int),
('bot_owner_id', check_int),
])),
])
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('owner_id', check_int),
])),
])
self.user_profile = self.example_user('iago')
owner = self.example_user('hamlet')
bot = self.create_bot('test')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('delete')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
])),
])
self.user_profile = self.example_user('aaron')
owner = self.example_user('hamlet')
bot = self.create_bot('test1', full_name='Test1 Testerson')
action = lambda: do_change_bot_owner(bot, owner, self.user_profile)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
check_services = check_list(sub_validator=None, length=0)
change_bot_owner_checker_bot = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_string),
('services', check_services),
])),
])
previous_owner = self.example_user('aaron')
self.user_profile = self.example_user('hamlet')
bot = self.create_test_bot('test2', previous_owner, full_name='Test2 Testerson')
action = lambda: do_change_bot_owner(bot, self.user_profile, previous_owner)
events = self.do_test(action, num_events=2)
error = change_bot_owner_checker_bot('events[0]', events[0])
self.assert_on_error(error)
error = change_bot_owner_checker_user('events[1]', events[1])
self.assert_on_error(error)
def test_do_update_outgoing_webhook_service(self):
# type: () -> None
update_outgoing_webhook_service_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('update')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
('token', check_string),
]))),
])),
])
self.user_profile = self.example_user('iago')
bot = self.create_test_bot('test', self.user_profile,
full_name='Test Bot',
bot_type=UserProfile.OUTGOING_WEBHOOK_BOT,
payload_url=ujson.dumps('http://hostname.domain2.com'),
interface_type=Service.GENERIC,
)
action = lambda: do_update_outgoing_webhook_service(bot, 2, 'http://hostname.domain2.com')
events = self.do_test(action)
error = update_outgoing_webhook_service_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_deactivate_user(self) -> None:
bot_deactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('remove')),
('bot', check_dict_only([
('email', check_string),
('full_name', check_string),
('user_id', check_int),
])),
])
bot = self.create_bot('test')
action = lambda: do_deactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_deactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_reactivate_user(self) -> None:
bot_reactivate_checker = self.check_events_dict([
('type', equals('realm_bot')),
('op', equals('add')),
('bot', check_dict_only([
('email', check_string),
('user_id', check_int),
('bot_type', check_int),
('full_name', check_string),
('is_active', check_bool),
('api_key', check_string),
('default_sending_stream', check_none_or(check_string)),
('default_events_register_stream', check_none_or(check_string)),
('default_all_public_streams', check_bool),
('avatar_url', check_string),
('owner', check_none_or(check_string)),
('services', check_list(check_dict_only([
('base_url', check_url),
('interface', check_int),
]))),
])),
])
bot = self.create_bot('test')
do_deactivate_user(bot)
action = lambda: do_reactivate_user(bot)
events = self.do_test(action, num_events=2)
error = bot_reactivate_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_mark_hotspot_as_read(self) -> None:
self.user_profile.tutorial_status = UserProfile.TUTORIAL_WAITING
self.user_profile.save(update_fields=['tutorial_status'])
schema_checker = self.check_events_dict([
('type', equals('hotspots')),
('hotspots', check_list(check_dict_only([
('name', check_string),
('title', check_string),
('description', check_string),
('delay', check_float),
]))),
])
events = self.do_test(lambda: do_mark_hotspot_as_read(self.user_profile, 'intro_reply'))
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_rename_stream(self) -> None:
stream = self.make_stream('old_name')
new_name = u'stream with a brand new name'
self.subscribe(self.user_profile, stream.name)
notification = '<p><span class="user-mention silent" data-user-id="{user_id}">King Hamlet</span> renamed stream <strong>old_name</strong> to <strong>stream with a brand new name</strong>.</p>'
notification = notification.format(user_id=self.user_profile.id)
action = lambda: do_rename_stream(stream, new_name, self.user_profile)
events = self.do_test(action, num_events=3)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('email_address')),
('value', check_string),
('stream_id', check_int),
('name', equals('old_name')),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('name')),
('value', equals(new_name)),
('name', equals('old_name')),
('stream_id', check_int),
])
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
schema_checker = check_dict([
('flags', check_list(check_string)),
('type', equals('message')),
('message', check_dict([
('timestamp', check_int),
('content', equals(notification)),
('content_type', equals('text/html')),
('sender_email', equals('notification-bot@zulip.com')),
('sender_id', check_int),
('sender_short_name', equals('notification-bot')),
('display_recipient', equals(new_name)),
('id', check_int),
('stream_id', check_int),
('sender_realm_str', check_string),
('sender_full_name', equals('Notification Bot')),
('is_me_message', equals(False)),
('type', equals('stream')),
('submessages', check_list(check_string)),
(TOPIC_LINKS, check_list(check_url)),
('avatar_url', check_url),
('reactions', check_list(None)),
('client', equals('Internal')),
(TOPIC_NAME, equals('stream events')),
('recipient_id', check_int)
])),
('id', check_int)
])
error = schema_checker('events[2]', events[2])
self.assert_on_error(error)
def test_deactivate_stream_neversubscribed(self) -> None:
stream = self.make_stream('old_name')
action = lambda: do_deactivate_stream(stream)
events = self.do_test(action)
schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('delete')),
('streams', check_list(check_dict([]))),
])
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_subscribe_other_user_never_subscribed(self) -> None:
action = lambda: self.subscribe(self.example_user("othello"), u"test_stream")
events = self.do_test(action, num_events=2)
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
error = peer_add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
@slow("Actually several tests combined together")
def test_subscribe_events(self) -> None:
self.do_test_subscribe_events(include_subscribers=True)
@slow("Actually several tests combined together")
def test_subscribe_events_no_include_subscribers(self) -> None:
self.do_test_subscribe_events(include_subscribers=False)
def do_test_subscribe_events(self, include_subscribers: bool) -> None:
subscription_fields = [
('color', check_string),
('description', check_string),
('rendered_description', check_string),
('email_address', check_string),
('invite_only', check_bool),
('is_web_public', check_bool),
('is_announcement_only', check_bool),
('is_muted', check_bool),
('in_home_view', check_bool),
('name', check_string),
('audible_notifications', check_none_or(check_bool)),
('email_notifications', check_none_or(check_bool)),
('desktop_notifications', check_none_or(check_bool)),
('push_notifications', check_none_or(check_bool)),
('stream_id', check_int),
('first_message_id', check_none_or(check_int)),
('history_public_to_subscribers', check_bool),
('pin_to_top', check_bool),
('stream_weekly_traffic', check_none_or(check_int)),
('is_old_stream', check_bool),
]
if include_subscribers:
subscription_fields.append(('subscribers', check_list(check_int)))
subscription_schema_checker = check_list(
check_dict_only(subscription_fields),
)
stream_create_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('create')),
('streams', check_list(check_dict_only([
('name', check_string),
('stream_id', check_int),
('invite_only', check_bool),
('description', check_string),
('rendered_description', check_string),
]))),
])
add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('add')),
('subscriptions', subscription_schema_checker),
])
remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('remove')),
('subscriptions', check_list(
check_dict_only([
('name', equals('test_stream')),
('stream_id', check_int),
]),
)),
])
peer_add_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_add')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
peer_remove_schema_checker = self.check_events_dict([
('type', equals('subscription')),
('op', equals('peer_remove')),
('user_id', check_int),
('subscriptions', check_list(check_string)),
])
stream_update_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('description')),
('value', check_string),
('rendered_description', check_string),
('stream_id', check_int),
('name', check_string),
])
stream_update_invite_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('invite_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
('history_public_to_subscribers', check_bool),
])
stream_update_is_announcement_only_schema_checker = self.check_events_dict([
('type', equals('stream')),
('op', equals('update')),
('property', equals('is_announcement_only')),
('stream_id', check_int),
('name', check_string),
('value', check_bool),
])
# Subscribe to a totally new stream, so it's just Hamlet on it
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream")
events = self.do_test(action, event_types=["subscription", "realm_user"],
include_subscribers=include_subscribers)
error = add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe(self.example_user("othello"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_add_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = get_stream("test_stream", self.user_profile.realm)
action = lambda: bulk_remove_subscriptions(
[self.example_user('othello')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
state_change_expected=include_subscribers,
)
error = peer_remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: bulk_remove_subscriptions(
[self.example_user('hamlet')],
[stream],
get_client("website"))
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=3)
error = remove_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream")
events = self.do_test(action,
include_subscribers=include_subscribers,
num_events=2)
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
action = lambda: do_change_stream_description(stream, u'new description')
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_stream_invite_only(stream, True, history_public_to_subscribers=True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_invite_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
action = lambda: do_change_stream_announcement_only(stream, True)
events = self.do_test(action,
include_subscribers=include_subscribers)
error = stream_update_is_announcement_only_schema_checker('events[0]', events[0])
self.assert_on_error(error)
stream = self.make_stream("private", self.user_profile.realm, invite_only=True)
user_profile = self.example_user('hamlet')
action = lambda: bulk_add_subscriptions([stream], [user_profile])
events = self.do_test(action, include_subscribers=include_subscribers,
num_events=2)
error = stream_create_schema_checker('events[0]', events[0])
error = add_schema_checker('events[1]', events[1])
self.assert_on_error(error)
def test_do_delete_message_stream(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("stream")),
('stream_id', check_int),
('topic', check_string),
])
msg_id = self.send_stream_message("hamlet@zulip.com", "Verona")
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_personal(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('delete_message')),
('message_id', check_int),
('sender', check_string),
('sender_id', check_int),
('message_type', equals("private")),
('recipient_id', check_int),
])
msg_id = self.send_personal_message(
self.example_email("cordelia"),
self.user_profile.email,
"hello",
)
message = Message.objects.get(id=msg_id)
events = self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_do_delete_message_no_max_id(self) -> None:
user_profile = self.example_user('aaron')
# Delete all historical messages for this user
user_profile = self.example_user('hamlet')
UserMessage.objects.filter(user_profile=user_profile).delete()
msg_id = self.send_stream_message("hamlet@zulip.com", "Verona")
message = Message.objects.get(id=msg_id)
self.do_test(
lambda: do_delete_messages(self.user_profile, [message]),
state_change_expected=True,
)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
def test_add_attachment(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('add')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.login(self.example_email("hamlet"))
fp = StringIO("zulip!")
fp.name = "zulip.txt"
data = {'uri': None}
def do_upload() -> None:
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
self.assertIn("uri", result.json())
uri = result.json()["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
data['uri'] = uri
events = self.do_test(
lambda: do_upload(),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Verify that the DB has the attachment marked as unclaimed
entry = Attachment.objects.get(file_name='zulip.txt')
self.assertEqual(entry.is_claimed(), False)
# Now we send an actual message using this attachment.
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('update')),
('attachment', check_dict_only([
('id', check_int),
('name', check_string),
('size', check_int),
('path_id', check_string),
('create_time', check_float),
('messages', check_list(check_dict_only([
('id', check_int),
('name', check_float),
]))),
])),
('upload_space_used', equals(6)),
])
self.subscribe(self.example_user("hamlet"), "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + data['uri'] + ")"
events = self.do_test(
lambda: self.send_stream_message(self.example_email("hamlet"), "Denmark", body, "test"),
num_events=2)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
# Now remove the attachment
schema_checker = self.check_events_dict([
('type', equals('attachment')),
('op', equals('remove')),
('attachment', check_dict_only([
('id', check_int),
])),
('upload_space_used', equals(0)),
])
events = self.do_test(
lambda: self.client_delete("/json/attachments/%s" % (entry.id,)),
num_events=1, state_change_expected=False)
error = schema_checker('events[0]', events[0])
self.assert_on_error(error)
def test_notify_realm_export(self) -> None:
schema_checker = self.check_events_dict([
('type', equals('realm_export')),
('exports', check_list(check_dict_only([
('id', check_int),
('export_time', check_float),
('acting_user_id', check_int),
('export_url', check_string),
('deleted_timestamp', equals(None)),
]))),
])
do_change_is_admin(self.user_profile, True)
self.login(self.user_profile.email)
with mock.patch('zerver.lib.export.do_export_realm',
return_value=create_dummy_file('test-export.tar.gz')):
with stdout_suppressed():
events = self.do_test(
lambda: self.client_post('/json/export/realm'),
state_change_expected=True, num_events=2)
# The first event is a message from notification-bot.
error = schema_checker('events[1]', events[1])
self.assert_on_error(error)
# Now we check the deletion of the export.
deletion_schema_checker = self.check_events_dict([
('type', equals('realm_export')),
('exports', check_list(check_dict_only([
('id', check_int),
('export_time', check_float),
('acting_user_id', check_int),
('export_url', check_string),
('deleted_timestamp', check_float),
]))),
])
audit_log_entry = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_EXPORTED).first()
events = self.do_test(
lambda: self.client_delete('/json/export/realm/{id}'.format(id=audit_log_entry.id)),
state_change_expected=False, num_events=1)
error = deletion_schema_checker('events[0]', events[0])
self.assert_on_error(error)
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self) -> None:
user_profile = self.example_user('cordelia')
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assert_length(result['realm_bots'], 0)
api_key = get_api_key(self.notification_bot())
self.assertNotIn(api_key, str(result))
def test_realm_bots_e(self) -> None:
user_profile = self.example_user('hamlet')
do_change_is_admin(user_profile, True)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertTrue(len(result['realm_bots']) > 2)
def test_max_message_id_with_no_history(self) -> None:
user_profile = self.example_user('aaron')
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
self.assertEqual(result['max_message_id'], -1)
def test_delivery_email_presence_for_non_admins(self) -> None:
user_profile = self.example_user('aaron')
self.assertFalse(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
def test_delivery_email_presence_for_admins(self) -> None:
user_profile = self.example_user('iago')
self.assertTrue(user_profile.is_realm_admin)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertNotIn('delivery_email', value)
do_set_realm_property(user_profile.realm, "email_address_visibility",
Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS)
result = fetch_initial_state_data(user_profile, None, "", client_gravatar=False)
for key, value in result['raw_users'].items():
self.assertIn('delivery_email', value)
class GetUnreadMsgsTest(ZulipTestCase):
def mute_stream(self, user_profile: UserProfile, stream: Stream) -> None:
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscription = Subscription.objects.get(
user_profile=user_profile,
recipient=recipient
)
subscription.is_muted = True
subscription.save()
def mute_topic(self, user_profile: UserProfile, stream_name: str,
topic_name: str) -> None:
realm = user_profile.realm
stream = get_stream(stream_name, realm)
recipient = get_stream_recipient(stream.id)
add_topic_mute(
user_profile=user_profile,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def test_raw_unread_stream(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
realm = hamlet.realm
for stream_name in ['social', 'devel', 'test here']:
self.subscribe(hamlet, stream_name)
self.subscribe(cordelia, stream_name)
all_message_ids = set()
message_ids = dict()
tups = [
('social', 'lunch'),
('test here', 'bla'),
('devel', 'python'),
('devel', 'ruby'),
]
for stream_name, topic_name in tups:
message_ids[topic_name] = [
self.send_stream_message(
sender_email=cordelia.email,
stream_name=stream_name,
topic_name=topic_name,
) for i in range(3)
]
all_message_ids |= set(message_ids[topic_name])
self.assertEqual(len(all_message_ids), 12)
self.mute_stream(
user_profile=hamlet,
stream=get_stream('test here', realm),
)
self.mute_topic(
user_profile=hamlet,
stream_name='devel',
topic_name='ruby',
)
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
stream_dict = raw_unread_data['stream_dict']
self.assertEqual(
set(stream_dict.keys()),
all_message_ids,
)
self.assertEqual(
raw_unread_data['unmuted_stream_msgs'],
set(message_ids['python']) | set(message_ids['lunch']),
)
self.assertEqual(
stream_dict[message_ids['lunch'][0]],
dict(
sender_id=cordelia.id,
stream_id=get_stream('social', realm).id,
topic='lunch',
)
)
def test_raw_unread_huddle(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
prospero = self.example_user('prospero')
huddle1_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, othello.email]
)
for i in range(3)
]
huddle2_message_ids = [
self.send_huddle_message(
cordelia.email,
[hamlet.email, prospero.email]
)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
huddle_dict = raw_unread_data['huddle_dict']
self.assertEqual(
set(huddle_dict.keys()),
set(huddle1_message_ids) | set(huddle2_message_ids)
)
huddle_string = ','.join(
str(uid)
for uid in sorted([cordelia.id, hamlet.id, othello.id])
)
self.assertEqual(
huddle_dict[huddle1_message_ids[0]],
dict(user_ids_string=huddle_string),
)
def test_raw_unread_personal(self) -> None:
cordelia = self.example_user('cordelia')
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia_pm_message_ids = [
self.send_personal_message(cordelia.email, hamlet.email)
for i in range(3)
]
othello_pm_message_ids = [
self.send_personal_message(othello.email, hamlet.email)
for i in range(3)
]
raw_unread_data = get_raw_unread_data(
user_profile=hamlet,
)
pm_dict = raw_unread_data['pm_dict']
self.assertEqual(
set(pm_dict.keys()),
set(cordelia_pm_message_ids) | set(othello_pm_message_ids)
)
self.assertEqual(
pm_dict[cordelia_pm_message_ids[0]],
dict(sender_id=cordelia.id),
)
def test_unread_msgs(self) -> None:
cordelia = self.example_user('cordelia')
sender_id = cordelia.id
sender_email = cordelia.email
user_profile = self.example_user('hamlet')
othello = self.example_user('othello')
assert(sender_email < user_profile.email)
assert(user_profile.email < othello.email)
pm1_message_id = self.send_personal_message(sender_email, user_profile.email, "hello1")
pm2_message_id = self.send_personal_message(sender_email, user_profile.email, "hello2")
muted_stream = self.subscribe(user_profile, 'Muted Stream')
self.mute_stream(user_profile, muted_stream)
self.mute_topic(user_profile, 'Denmark', 'muted-topic')
stream_message_id = self.send_stream_message(sender_email, "Denmark", "hello")
muted_stream_message_id = self.send_stream_message(sender_email, "Muted Stream", "hello")
muted_topic_message_id = self.send_stream_message(
sender_email,
"Denmark",
topic_name="muted-topic",
content="hello",
)
huddle_message_id = self.send_huddle_message(
sender_email,
[user_profile.email, othello.email],
'hello3',
)
def get_unread_data() -> UnreadMessagesResult:
raw_unread_data = get_raw_unread_data(user_profile)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data
result = get_unread_data()
self.assertEqual(result['count'], 4)
unread_pm = result['pms'][0]
self.assertEqual(unread_pm['sender_id'], sender_id)
self.assertEqual(unread_pm['unread_message_ids'], [pm1_message_id, pm2_message_id])
self.assertTrue('sender_ids' not in unread_pm)
unread_stream = result['streams'][0]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'muted-topic')
self.assertEqual(unread_stream['unread_message_ids'], [muted_topic_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][1]
self.assertEqual(unread_stream['stream_id'], get_stream('Denmark', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
unread_stream = result['streams'][2]
self.assertEqual(unread_stream['stream_id'], get_stream('Muted Stream', user_profile.realm).id)
self.assertEqual(unread_stream['topic'], 'test')
self.assertEqual(unread_stream['unread_message_ids'], [muted_stream_message_id])
self.assertEqual(unread_stream['sender_ids'], [sender_id])
huddle_string = ','.join(str(uid) for uid in sorted([sender_id, user_profile.id, othello.id]))
unread_huddle = result['huddles'][0]
self.assertEqual(unread_huddle['user_ids_string'], huddle_string)
self.assertEqual(unread_huddle['unread_message_ids'], [huddle_message_id])
self.assertTrue('sender_ids' not in unread_huddle)
self.assertEqual(result['mentions'], [])
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=stream_message_id
)
um.flags |= UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
# TODO: This should change when we make alert words work better.
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [stream_message_id])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
# Test with a muted stream
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_stream_message_id
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [muted_stream_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
# Test with a muted topic
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=muted_topic_message_id
)
um.flags = UserMessage.flags.mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [muted_topic_message_id])
um.flags = UserMessage.flags.has_alert_word
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = UserMessage.flags.wildcard_mentioned
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
um.flags = 0
um.save()
result = get_unread_data()
self.assertEqual(result['mentions'], [])
class ClientDescriptorsTest(ZulipTestCase):
def test_get_client_info_for_all_public_streams(self) -> None:
hamlet = self.example_user('hamlet')
realm = hamlet.realm
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, True)
self.assertEqual(dct['client'].client_gravatar, True)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], [])
self.assertEqual(dct['is_sender'], False)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
sender_queue_id=client.event_queue.id,
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['is_sender'], True)
def test_get_client_info_for_normal_users(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
realm = hamlet.realm
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
client_type_name='website',
event_types=['message'],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name='whatever',
)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
],
)
self.assertEqual(len(client_info), 0)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
dict(id=hamlet.id, flags=['mentioned']),
],
)
self.assertEqual(len(client_info), 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct['client'].apply_markdown, apply_markdown)
self.assertEqual(dct['client'].client_gravatar, client_gravatar)
self.assertEqual(dct['client'].user_profile_id, hamlet.id)
self.assertEqual(dct['flags'], ['mentioned'])
self.assertEqual(dct['is_sender'], False)
test_get_info(apply_markdown=False, client_gravatar=False)
test_get_info(apply_markdown=True, client_gravatar=False)
test_get_info(apply_markdown=False, client_gravatar=True)
test_get_info(apply_markdown=True, client_gravatar=True)
def test_process_message_event_with_mocked_client_info(self) -> None:
hamlet = self.example_user("hamlet")
class MockClient:
def __init__(self, user_profile_id: int,
apply_markdown: bool,
client_gravatar: bool) -> None:
self.user_profile_id = user_profile_id
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.client_type_name = 'whatever'
self.events = [] # type: List[Dict[str, Any]]
def accepts_messages(self) -> bool:
return True
def accepts_event(self, event: Dict[str, Any]) -> bool:
assert(event['type'] == 'message')
return True
def add_event(self, event: Dict[str, Any]) -> None:
self.events.append(event)
client1 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=False,
)
client2 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=False,
)
client3 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=True,
)
client4 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=True,
)
client_info = {
'client:1': dict(
client=client1,
flags=['starred'],
),
'client:2': dict(
client=client2,
flags=['has_alert_word'],
),
'client:3': dict(
client=client3,
flags=[],
),
'client:4': dict(
client=client4,
flags=[],
),
}
sender = hamlet
message_event = dict(
message_dict=dict(
id=999,
content='**hello**',
rendered_content='<b>hello</b>',
sender_id=sender.id,
type='stream',
client='website',
# NOTE: Some of these fields are clutter, but some
# will be useful when we let clients specify
# that they can compute their own gravatar URLs.
sender_email=sender.email,
sender_realm_id=sender.realm_id,
sender_avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
sender_avatar_version=1,
sender_is_mirror_dummy=None,
recipient_type=None,
recipient_type_id=None,
),
)
# Setting users to `[]` bypasses code we don't care about
users = []
with mock.patch('zerver.tornado.event_queue.get_client_info_for_message_event',
return_value=client_info):
process_message_event(message_event, users)
for client in [client1, client2]:
message = client.events[0]['message']
self.assertIn('gravatar.com', message['avatar_url'])
message.pop('avatar_url')
self.assertEqual(client1.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=['starred'],
),
])
self.assertEqual(client2.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=['has_alert_word'],
),
])
self.assertEqual(client3.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='<b>hello</b>',
content_type='text/html',
client='website',
),
flags=[],
),
])
self.assertEqual(client4.events, [
dict(
type='message',
message=dict(
type='stream',
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content='**hello**',
content_type='text/x-markdown',
client='website',
),
flags=[],
),
])
class FetchQueriesTest(ZulipTestCase):
def test_queries(self) -> None:
user = self.example_user("hamlet")
self.login(user.email)
flush_per_request_caches()
with queries_captured() as queries:
with mock.patch('zerver.lib.events.always_want') as want_mock:
fetch_initial_state_data(
user_profile=user,
event_types=None,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, 33)
expected_counts = dict(
alert_words=0,
custom_profile_fields=1,
default_streams=1,
default_stream_groups=1,
hotspots=0,
message=1,
muted_topics=1,
pointer=0,
presence=3,
realm=0,
realm_bot=1,
realm_domains=1,
realm_embedded_bots=0,
realm_incoming_webhook_bots=0,
realm_emoji=1,
realm_filters=1,
realm_user=3,
realm_user_groups=2,
recent_private_conversations=2,
starred_messages=1,
stream=2,
stop_words=0,
subscription=6,
update_display_settings=0,
update_global_notifications=0,
update_message_flags=5,
user_status=1,
zulip_version=0,
)
wanted_event_types = {
item[0][0] for item
in want_mock.call_args_list
}
self.assertEqual(wanted_event_types, set(expected_counts))
for event_type in sorted(wanted_event_types):
count = expected_counts[event_type]
flush_per_request_caches()
with queries_captured() as queries:
if event_type == 'update_message_flags':
event_types = ['update_message_flags', 'message']
else:
event_types = [event_type]
fetch_initial_state_data(
user_profile=user,
event_types=event_types,
queue_id='x',
client_gravatar=False,
)
self.assert_length(queries, count)
class TestEventsRegisterAllPublicStreamsDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_use_passed_all_public_true_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
self.stream = get_stream('Verona', self.user_profile.realm)
def test_use_passed_narrow_no_default(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_passed_narrow_with_default(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [[u'stream', u'my_stream']])
self.assertEqual(result, [[u'stream', u'my_stream']])
def test_use_default_if_narrow_is_empty(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [[u'stream', u'Verona']])
def test_use_narrow_if_default_is_none(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
class TestGetRawUserDataSystemBotRealm(ZulipTestCase):
def test_get_raw_user_data_on_system_bot_realm(self) -> None:
result = get_raw_user_data(get_realm("zulipinternal"), self.example_user('hamlet'), True)
for bot_email in settings.CROSS_REALM_BOT_EMAILS:
bot_profile = get_system_bot(bot_email)
self.assertTrue(bot_profile.id in result)
self.assertTrue(result[bot_profile.id]['is_cross_realm_bot'])
| true | true |
f730098e8822814319a07f20bdedf1174871a9d9 | 674 | py | Python | setup.py | akuhnregnier/jupyter-notebook-tools | c0afc5769d5f53b36fdd0fee976126a2587dfb35 | [
"Apache-2.0"
] | null | null | null | setup.py | akuhnregnier/jupyter-notebook-tools | c0afc5769d5f53b36fdd0fee976126a2587dfb35 | [
"Apache-2.0"
] | null | null | null | setup.py | akuhnregnier/jupyter-notebook-tools | c0afc5769d5f53b36fdd0fee976126a2587dfb35 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
NAME = "jupyter-notebook-tools"
with open("README.md", "r") as f:
readme = f.read()
setup(
name=NAME,
url=f"https://github.com/akuhnregnier/{NAME}",
author="Alexander Kuhn-Regnier",
author_email="ahf.kuhnregnier@gmail.com",
long_description=readme,
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={
"console_scripts": ["nbstripout-fast=nbstripout.nbstripout_fast:main"]
},
python_requires=">=3.6",
setup_requires=["setuptools-scm"],
use_scm_version=dict(write_to="src/nbstripout/_version.py"),
install_requires=(),
)
| 25.923077 | 78 | 0.667656 |
from setuptools import find_packages, setup
NAME = "jupyter-notebook-tools"
with open("README.md", "r") as f:
readme = f.read()
setup(
name=NAME,
url=f"https://github.com/akuhnregnier/{NAME}",
author="Alexander Kuhn-Regnier",
author_email="ahf.kuhnregnier@gmail.com",
long_description=readme,
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={
"console_scripts": ["nbstripout-fast=nbstripout.nbstripout_fast:main"]
},
python_requires=">=3.6",
setup_requires=["setuptools-scm"],
use_scm_version=dict(write_to="src/nbstripout/_version.py"),
install_requires=(),
)
| true | true |
f7300a2ffabcd3bc05da38c7d5de492b1f25e72f | 901 | py | Python | project/product/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | project/product/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | project/product/views.py | steetstyle/Django-Ecommerce-API | 89c2c973e560346a5be74019709dc9a9f8ab7b2a | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import viewsets
from .models import Product
from .serializers import ProductSerializer
from core.permissions import MarketOwnerPermission
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework import filters
class ProductViewSet(viewsets.ModelViewSet):
"""
A ModelViewSet for viewing and editing Products.
"""
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = []
filter_backends = [filters.SearchFilter]
search_fields = '__all__'
def get_permissions(self):
if self.action in ['update','partial_update','destroy','create']:
self.permission_classes = [IsAuthenticated, MarketOwnerPermission]
else :
self.permission_classes = [AllowAny]
return super(self.__class__, self).get_permissions() | 34.653846 | 78 | 0.746948 | from django.shortcuts import render
from rest_framework import viewsets
from .models import Product
from .serializers import ProductSerializer
from core.permissions import MarketOwnerPermission
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework import filters
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = []
filter_backends = [filters.SearchFilter]
search_fields = '__all__'
def get_permissions(self):
if self.action in ['update','partial_update','destroy','create']:
self.permission_classes = [IsAuthenticated, MarketOwnerPermission]
else :
self.permission_classes = [AllowAny]
return super(self.__class__, self).get_permissions() | true | true |
f7300b19d159e1cd832307bc71cecb184c4499e1 | 48,458 | py | Python | tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py | renovate-bot/python-videointelligence | 8a7920066cffa98c5a98d451a6a924fa82281544 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py | renovate-bot/python-videointelligence | 8a7920066cffa98c5a98d451a6a924fa82281544 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py | renovate-bot/python-videointelligence | 8a7920066cffa98c5a98d451a6a924fa82281544 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
transports,
)
from google.cloud.videointelligence_v1p2beta1.types import video_intelligence
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VideoIntelligenceServiceClient._get_default_mtls_endpoint(None) is None
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VideoIntelligenceServiceGrpcTransport, "grpc"),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_video_intelligence_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_client_get_transport_class():
transport = VideoIntelligenceServiceClient.get_transport_class()
available_transports = [
transports.VideoIntelligenceServiceGrpcTransport,
]
assert transport in available_transports
transport = VideoIntelligenceServiceClient.get_transport_class("grpc")
assert transport == transports.VideoIntelligenceServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"true",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"false",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_video_intelligence_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_video_intelligence_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VideoIntelligenceServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_annotate_video(
transport: str = "grpc", request_type=video_intelligence.AnnotateVideoRequest
):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_annotate_video_from_dict():
test_annotate_video(request_type=dict)
def test_annotate_video_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
client.annotate_video()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
@pytest.mark.asyncio
async def test_annotate_video_async(
transport: str = "grpc_asyncio",
request_type=video_intelligence.AnnotateVideoRequest,
):
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_annotate_video_async_from_dict():
await test_annotate_video_async(request_type=dict)
def test_annotate_video_flattened():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
def test_annotate_video_flattened_error():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
@pytest.mark.asyncio
async def test_annotate_video_flattened_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
@pytest.mark.asyncio
async def test_annotate_video_flattened_error_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VideoIntelligenceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.VideoIntelligenceServiceGrpcTransport,
)
def test_video_intelligence_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_video_intelligence_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("annotate_video",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_video_intelligence_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_video_intelligence_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport()
adc.assert_called_once()
def test_video_intelligence_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VideoIntelligenceServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VideoIntelligenceServiceGrpcTransport, grpc_helpers),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_video_intelligence_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_video_intelligence_service_host_no_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com"
),
)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_host_with_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com:8000"
),
)
assert client.transport._host == "videointelligence.googleapis.com:8000"
def test_video_intelligence_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_video_intelligence_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_video_intelligence_service_grpc_lro_client():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_video_intelligence_service_grpc_lro_async_client():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VideoIntelligenceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = VideoIntelligenceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = VideoIntelligenceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = VideoIntelligenceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = VideoIntelligenceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = VideoIntelligenceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = VideoIntelligenceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = VideoIntelligenceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VideoIntelligenceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = VideoIntelligenceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VideoIntelligenceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 37.769291 | 154 | 0.694725 |
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
transports,
)
from google.cloud.videointelligence_v1p2beta1.types import video_intelligence
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VideoIntelligenceServiceClient._get_default_mtls_endpoint(None) is None
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VideoIntelligenceServiceGrpcTransport, "grpc"),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_video_intelligence_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_client_get_transport_class():
transport = VideoIntelligenceServiceClient.get_transport_class()
available_transports = [
transports.VideoIntelligenceServiceGrpcTransport,
]
assert transport in available_transports
transport = VideoIntelligenceServiceClient.get_transport_class("grpc")
assert transport == transports.VideoIntelligenceServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_client_options(
client_class, transport_class, transport_name
):
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"true",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"false",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_video_intelligence_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_video_intelligence_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VideoIntelligenceServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_annotate_video(
transport: str = "grpc", request_type=video_intelligence.AnnotateVideoRequest
):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_annotate_video_from_dict():
test_annotate_video(request_type=dict)
def test_annotate_video_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
client.annotate_video()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
@pytest.mark.asyncio
async def test_annotate_video_async(
transport: str = "grpc_asyncio",
request_type=video_intelligence.AnnotateVideoRequest,
):
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_annotate_video_async_from_dict():
await test_annotate_video_async(request_type=dict)
def test_annotate_video_flattened():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
def test_annotate_video_flattened_error():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
@pytest.mark.asyncio
async def test_annotate_video_flattened_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
@pytest.mark.asyncio
async def test_annotate_video_flattened_error_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VideoIntelligenceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.VideoIntelligenceServiceGrpcTransport,
)
def test_video_intelligence_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_video_intelligence_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("annotate_video",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_video_intelligence_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_video_intelligence_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport()
adc.assert_called_once()
def test_video_intelligence_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VideoIntelligenceServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VideoIntelligenceServiceGrpcTransport, grpc_helpers),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_video_intelligence_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_video_intelligence_service_host_no_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com"
),
)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_host_with_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com:8000"
),
)
assert client.transport._host == "videointelligence.googleapis.com:8000"
def test_video_intelligence_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_video_intelligence_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_video_intelligence_service_grpc_lro_client():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_video_intelligence_service_grpc_lro_async_client():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VideoIntelligenceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = VideoIntelligenceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = VideoIntelligenceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = VideoIntelligenceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = VideoIntelligenceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = VideoIntelligenceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = VideoIntelligenceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = VideoIntelligenceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VideoIntelligenceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = VideoIntelligenceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VideoIntelligenceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| true | true |
f7300bb85137e40f00493456c34280037c0a2f36 | 273 | py | Python | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OfferConfig(AppConfig):
label = 'offer'
name = 'oscar.apps.offer'
verbose_name = _('Offer')
def ready(self):
from . import signals # noqa
| 22.75 | 56 | 0.663004 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OfferConfig(AppConfig):
label = 'offer'
name = 'oscar.apps.offer'
verbose_name = _('Offer')
def ready(self):
from . import signals
| true | true |
f7300c7f3d0d042ee232156f7fea0be53fb5f268 | 18,769 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/passcriteria_1568efcb71d423db7b9caee1463792cd.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/passcriteria_1568efcb71d423db7b9caee1463792cd.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/passcriteria_1568efcb71d423db7b9caee1463792cd.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class PassCriteria(Base):
"""This applies the Pass Criteria to each trial in the test and determines whether the trial passed or failed.
The PassCriteria class encapsulates a required passCriteria resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'passCriteria'
_SDM_ATT_MAP = {
'EnableLatencyPassFail': 'enableLatencyPassFail',
'EnablePassFail': 'enablePassFail',
'EnableRatePassFail': 'enableRatePassFail',
'LatencyThresholdMode': 'latencyThresholdMode',
'LatencyThresholdScale': 'latencyThresholdScale',
'LatencyThresholdValue': 'latencyThresholdValue',
'PassCriteriaLoadRateMode': 'passCriteriaLoadRateMode',
'PassCriteriaLoadRateScale': 'passCriteriaLoadRateScale',
'PassCriteriaLoadRateValue': 'passCriteriaLoadRateValue',
'PassFailFrequency': 'passFailFrequency',
}
_SDM_ENUM_MAP = {
'latencyThresholdMode': ['average', 'maximum'],
'latencyThresholdScale': ['ms', 'ns', 'us'],
'passCriteriaLoadRateMode': ['average', 'minimum'],
'passCriteriaLoadRateScale': ['fps', 'gbps', 'kbps', 'mbps', 'percent'],
'passFailFrequency': ['framesizes', 'trials'],
}
def __init__(self, parent, list_op=False):
super(PassCriteria, self).__init__(parent, list_op)
@property
def EnableLatencyPassFail(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, the latency pass fail criteria is set.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableLatencyPassFail'])
@EnableLatencyPassFail.setter
def EnableLatencyPassFail(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableLatencyPassFail'], value)
@property
def EnablePassFail(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, the pass fail criteria is set.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePassFail'])
@EnablePassFail.setter
def EnablePassFail(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnablePassFail'], value)
@property
def EnableRatePassFail(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, the rate of pass and fail criteria is set.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableRatePassFail'])
@EnableRatePassFail.setter
def EnableRatePassFail(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableRatePassFail'], value)
@property
def LatencyThresholdMode(self):
# type: () -> str
"""
Returns
-------
- str(average | maximum): The threshold mode for the latency.
"""
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdMode'])
@LatencyThresholdMode.setter
def LatencyThresholdMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdMode'], value)
@property
def LatencyThresholdScale(self):
# type: () -> str
"""
Returns
-------
- str(ms | ns | us): The scale by which the latency threshold is measured.
"""
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdScale'])
@LatencyThresholdScale.setter
def LatencyThresholdScale(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdScale'], value)
@property
def LatencyThresholdValue(self):
# type: () -> int
"""
Returns
-------
- number: The value by which legacy threshold value is to be measured.
"""
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdValue'])
@LatencyThresholdValue.setter
def LatencyThresholdValue(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdValue'], value)
@property
def PassCriteriaLoadRateMode(self):
# type: () -> str
"""
Returns
-------
- str(average | minimum): The pass criteria set for the load rate mode.
"""
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateMode'])
@PassCriteriaLoadRateMode.setter
def PassCriteriaLoadRateMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateMode'], value)
@property
def PassCriteriaLoadRateScale(self):
# type: () -> str
"""
Returns
-------
- str(fps | gbps | kbps | mbps | percent): The pass criteria scale in which the load rate is to be measured.
"""
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateScale'])
@PassCriteriaLoadRateScale.setter
def PassCriteriaLoadRateScale(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateScale'], value)
@property
def PassCriteriaLoadRateValue(self):
# type: () -> int
"""
Returns
-------
- number: The pass criteria for the Value of the load rate.
"""
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateValue'])
@PassCriteriaLoadRateValue.setter
def PassCriteriaLoadRateValue(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateValue'], value)
@property
def PassFailFrequency(self):
# type: () -> str
"""
Returns
-------
- str(framesizes | trials): NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['PassFailFrequency'])
@PassFailFrequency.setter
def PassFailFrequency(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PassFailFrequency'], value)
def update(self, EnableLatencyPassFail=None, EnablePassFail=None, EnableRatePassFail=None, LatencyThresholdMode=None, LatencyThresholdScale=None, LatencyThresholdValue=None, PassCriteriaLoadRateMode=None, PassCriteriaLoadRateScale=None, PassCriteriaLoadRateValue=None, PassFailFrequency=None):
# type: (bool, bool, bool, str, str, int, str, str, int, str) -> PassCriteria
"""Updates passCriteria resource on the server.
Args
----
- EnableLatencyPassFail (bool): If true, the latency pass fail criteria is set.
- EnablePassFail (bool): If true, the pass fail criteria is set.
- EnableRatePassFail (bool): If true, the rate of pass and fail criteria is set.
- LatencyThresholdMode (str(average | maximum)): The threshold mode for the latency.
- LatencyThresholdScale (str(ms | ns | us)): The scale by which the latency threshold is measured.
- LatencyThresholdValue (number): The value by which legacy threshold value is to be measured.
- PassCriteriaLoadRateMode (str(average | minimum)): The pass criteria set for the load rate mode.
- PassCriteriaLoadRateScale (str(fps | gbps | kbps | mbps | percent)): The pass criteria scale in which the load rate is to be measured.
- PassCriteriaLoadRateValue (number): The pass criteria for the Value of the load rate.
- PassFailFrequency (str(framesizes | trials)): NOT DEFINED
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| 45.335749 | 297 | 0.644414 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class PassCriteria(Base):
__slots__ = ()
_SDM_NAME = 'passCriteria'
_SDM_ATT_MAP = {
'EnableLatencyPassFail': 'enableLatencyPassFail',
'EnablePassFail': 'enablePassFail',
'EnableRatePassFail': 'enableRatePassFail',
'LatencyThresholdMode': 'latencyThresholdMode',
'LatencyThresholdScale': 'latencyThresholdScale',
'LatencyThresholdValue': 'latencyThresholdValue',
'PassCriteriaLoadRateMode': 'passCriteriaLoadRateMode',
'PassCriteriaLoadRateScale': 'passCriteriaLoadRateScale',
'PassCriteriaLoadRateValue': 'passCriteriaLoadRateValue',
'PassFailFrequency': 'passFailFrequency',
}
_SDM_ENUM_MAP = {
'latencyThresholdMode': ['average', 'maximum'],
'latencyThresholdScale': ['ms', 'ns', 'us'],
'passCriteriaLoadRateMode': ['average', 'minimum'],
'passCriteriaLoadRateScale': ['fps', 'gbps', 'kbps', 'mbps', 'percent'],
'passFailFrequency': ['framesizes', 'trials'],
}
def __init__(self, parent, list_op=False):
super(PassCriteria, self).__init__(parent, list_op)
@property
def EnableLatencyPassFail(self):
return self._get_attribute(self._SDM_ATT_MAP['EnableLatencyPassFail'])
@EnableLatencyPassFail.setter
def EnableLatencyPassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableLatencyPassFail'], value)
@property
def EnablePassFail(self):
return self._get_attribute(self._SDM_ATT_MAP['EnablePassFail'])
@EnablePassFail.setter
def EnablePassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePassFail'], value)
@property
def EnableRatePassFail(self):
return self._get_attribute(self._SDM_ATT_MAP['EnableRatePassFail'])
@EnableRatePassFail.setter
def EnableRatePassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableRatePassFail'], value)
@property
def LatencyThresholdMode(self):
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdMode'])
@LatencyThresholdMode.setter
def LatencyThresholdMode(self, value):
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdMode'], value)
@property
def LatencyThresholdScale(self):
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdScale'])
@LatencyThresholdScale.setter
def LatencyThresholdScale(self, value):
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdScale'], value)
@property
def LatencyThresholdValue(self):
return self._get_attribute(self._SDM_ATT_MAP['LatencyThresholdValue'])
@LatencyThresholdValue.setter
def LatencyThresholdValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['LatencyThresholdValue'], value)
@property
def PassCriteriaLoadRateMode(self):
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateMode'])
@PassCriteriaLoadRateMode.setter
def PassCriteriaLoadRateMode(self, value):
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateMode'], value)
@property
def PassCriteriaLoadRateScale(self):
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateScale'])
@PassCriteriaLoadRateScale.setter
def PassCriteriaLoadRateScale(self, value):
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateScale'], value)
@property
def PassCriteriaLoadRateValue(self):
return self._get_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateValue'])
@PassCriteriaLoadRateValue.setter
def PassCriteriaLoadRateValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['PassCriteriaLoadRateValue'], value)
@property
def PassFailFrequency(self):
return self._get_attribute(self._SDM_ATT_MAP['PassFailFrequency'])
@PassFailFrequency.setter
def PassFailFrequency(self, value):
self._set_attribute(self._SDM_ATT_MAP['PassFailFrequency'], value)
def update(self, EnableLatencyPassFail=None, EnablePassFail=None, EnableRatePassFail=None, LatencyThresholdMode=None, LatencyThresholdScale=None, LatencyThresholdValue=None, PassCriteriaLoadRateMode=None, PassCriteriaLoadRateScale=None, PassCriteriaLoadRateValue=None, PassFailFrequency=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| true | true |
f7300d0ecc7c55bf1cf645207acdfe80dd0197b9 | 40 | py | Python | GAN-man/__init__.py | shauray8/GameGAN-stuff | 54eec30e839279b697166d7ad1bbcf0d342f62b3 | [
"MIT"
] | null | null | null | GAN-man/__init__.py | shauray8/GameGAN-stuff | 54eec30e839279b697166d7ad1bbcf0d342f62b3 | [
"MIT"
] | null | null | null | GAN-man/__init__.py | shauray8/GameGAN-stuff | 54eec30e839279b697166d7ad1bbcf0d342f62b3 | [
"MIT"
] | null | null | null | ## right now i know nothing about this
| 20 | 39 | 0.725 | true | true | |
f7300e0e1a30b53883b40f11ba8186a2c62c128e | 2,827 | py | Python | gw2/gw2_authserver.py | Mixaill/galaxy-integration-gw2 | 3727c90e340763e61738f8edd97025242ff34946 | [
"MIT"
] | 20 | 2019-07-26T10:38:26.000Z | 2021-01-31T17:16:45.000Z | gw2/gw2_authserver.py | FriendsOfGalaxy/galaxy-integration-gw2 | dbb5cd082f4ebeef502e2185773e1ab36ead7c74 | [
"MIT"
] | 18 | 2019-08-01T10:18:00.000Z | 2022-03-01T08:10:56.000Z | gw2/gw2_authserver.py | Mixaill/galaxy-integration-gw2 | 3727c90e340763e61738f8edd97025242ff34946 | [
"MIT"
] | 4 | 2019-08-08T16:39:53.000Z | 2020-10-17T09:01:47.000Z | # (c) 2019-2020 Mikhail Paulyshka
# SPDX-License-Identifier: MIT
import os.path
import aiohttp
import common.mglx_webserver
from .gw2_constants import GW2AuthorizationResult
class Gw2AuthServer(common.mglx_webserver.MglxWebserver):
def __init__(self, gw2api = None):
super(Gw2AuthServer, self).__init__()
self.__gw2api = gw2api
self.add_route('GET', '/', self.handle_login_get)
self.add_route('GET', '/login', self.handle_login_get)
self.add_route('GET', '/login_baddata', self.handle_login_baddata_get)
self.add_route('GET', '/login_failed', self.handle_login_baddata_get)
self.add_route('GET', '/login_noaccount', self.handle_login_noaccount_get)
self.add_route('GET', '/finished', self.handle_finished_get)
self.add_route('POST', '/', self.handle_login_post)
self.add_route('POST', '/login', self.handle_login_post)
#
# Handlers
#
async def handle_login_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login.html'))
async def handle_login_baddata_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_baddata.html'))
async def handle_login_failed_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_failed.html'))
async def handle_login_noaccount_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def handle_finished_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def handle_login_post(self, request):
data = await request.post()
#check for apikey field
if 'apikey' not in data:
raise aiohttp.web.HTTPFound('/login_baddata')
#process authentication
auth_result = None
try:
auth_result = await self.__gw2api.do_auth_apikey(data['apikey'])
except Exception:
self._logger.exception("exception on doing auth:")
raise aiohttp.web.HTTPFound('/login_baddata')
if auth_result == GW2AuthorizationResult.FINISHED:
raise aiohttp.web.HTTPFound('/finished')
elif auth_result == GW2AuthorizationResult.FAILED_NO_ACCOUNT:
raise aiohttp.web.HTTPFound('/login_noaccount')
elif auth_result == GW2AuthorizationResult.FAILED_BAD_DATA:
raise aiohttp.web.HTTPFound('/login_baddata')
else:
raise aiohttp.web.HTTPFound('/login_failed')
raise aiohttp.web.HTTPFound('/login_failed')
| 39.263889 | 126 | 0.696498 |
import os.path
import aiohttp
import common.mglx_webserver
from .gw2_constants import GW2AuthorizationResult
class Gw2AuthServer(common.mglx_webserver.MglxWebserver):
def __init__(self, gw2api = None):
super(Gw2AuthServer, self).__init__()
self.__gw2api = gw2api
self.add_route('GET', '/', self.handle_login_get)
self.add_route('GET', '/login', self.handle_login_get)
self.add_route('GET', '/login_baddata', self.handle_login_baddata_get)
self.add_route('GET', '/login_failed', self.handle_login_baddata_get)
self.add_route('GET', '/login_noaccount', self.handle_login_noaccount_get)
self.add_route('GET', '/finished', self.handle_finished_get)
self.add_route('POST', '/', self.handle_login_post)
self.add_route('POST', '/login', self.handle_login_post)
async def handle_login_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login.html'))
async def handle_login_baddata_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_baddata.html'))
async def handle_login_failed_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_failed.html'))
async def handle_login_noaccount_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def handle_finished_get(self, request):
return aiohttp.web.FileResponse(os.path.join(os.path.dirname(os.path.realpath(__file__)),'html/login_noaccount.html'))
async def handle_login_post(self, request):
data = await request.post()
if 'apikey' not in data:
raise aiohttp.web.HTTPFound('/login_baddata')
auth_result = None
try:
auth_result = await self.__gw2api.do_auth_apikey(data['apikey'])
except Exception:
self._logger.exception("exception on doing auth:")
raise aiohttp.web.HTTPFound('/login_baddata')
if auth_result == GW2AuthorizationResult.FINISHED:
raise aiohttp.web.HTTPFound('/finished')
elif auth_result == GW2AuthorizationResult.FAILED_NO_ACCOUNT:
raise aiohttp.web.HTTPFound('/login_noaccount')
elif auth_result == GW2AuthorizationResult.FAILED_BAD_DATA:
raise aiohttp.web.HTTPFound('/login_baddata')
else:
raise aiohttp.web.HTTPFound('/login_failed')
raise aiohttp.web.HTTPFound('/login_failed')
| true | true |
f7300e6ef235606ad3583d99a0c55a99bd82137c | 2,108 | py | Python | toontown/coghq/FactoryEntityCreator.py | philicheese2003/ToontownProjectAltisServer | cfa225d1bdddacdbd29b621382347fce17e1dc66 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/coghq/FactoryEntityCreator.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | null | null | null | toontown/coghq/FactoryEntityCreator.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 2 | 2021-02-25T06:02:05.000Z | 2021-06-19T03:11:22.000Z | from otp.level import EntityCreator
from toontown.coghq import FactoryLevelMgr
from toontown.coghq import PlatformEntity
from toontown.coghq import ConveyorBelt
from toontown.coghq import GearEntity
from toontown.coghq import PaintMixer
from toontown.coghq import GoonClipPlane
from toontown.coghq import MintProduct
from toontown.coghq import MintProductPallet
from toontown.coghq import MintShelf
from toontown.coghq import PathMasterEntity
from toontown.coghq import RenderingEntity
class FactoryEntityCreator(EntityCreator.EntityCreator):
def __init__(self, level):
EntityCreator.EntityCreator.__init__(self, level)
nothing = EntityCreator.nothing
nonlocal = EntityCreator.nonlocal
self.privRegisterTypes({'activeCell': nonlocal,
'crusherCell': nonlocal,
'battleBlocker': nonlocal,
'beanBarrel': nonlocal,
'button': nonlocal,
'conveyorBelt': ConveyorBelt.ConveyorBelt,
'crate': nonlocal,
'door': nonlocal,
'directionalCell': nonlocal,
'gagBarrel': nonlocal,
'gear': GearEntity.GearEntity,
'goon': nonlocal,
'gridGoon': nonlocal,
'golfGreenGame': nonlocal,
'goonClipPlane': GoonClipPlane.GoonClipPlane,
'grid': nonlocal,
'healBarrel': nonlocal,
'levelMgr': FactoryLevelMgr.FactoryLevelMgr,
'lift': nonlocal,
'mintProduct': MintProduct.MintProduct,
'mintProductPallet': MintProductPallet.MintProductPallet,
'mintShelf': MintShelf.MintShelf,
'mover': nonlocal,
'paintMixer': PaintMixer.PaintMixer,
'pathMaster': PathMasterEntity.PathMasterEntity,
'rendering': RenderingEntity.RenderingEntity,
'platform': PlatformEntity.PlatformEntity,
'sinkingPlatform': nonlocal,
'stomper': nonlocal,
'stomperPair': nonlocal,
'laserField': nonlocal,
'securityCamera': nonlocal,
'elevatorMarker': nonlocal,
'trigger': nonlocal,
'moleField': nonlocal,
'maze': nonlocal})
| 37.642857 | 66 | 0.680266 | from otp.level import EntityCreator
from toontown.coghq import FactoryLevelMgr
from toontown.coghq import PlatformEntity
from toontown.coghq import ConveyorBelt
from toontown.coghq import GearEntity
from toontown.coghq import PaintMixer
from toontown.coghq import GoonClipPlane
from toontown.coghq import MintProduct
from toontown.coghq import MintProductPallet
from toontown.coghq import MintShelf
from toontown.coghq import PathMasterEntity
from toontown.coghq import RenderingEntity
class FactoryEntityCreator(EntityCreator.EntityCreator):
def __init__(self, level):
EntityCreator.EntityCreator.__init__(self, level)
nothing = EntityCreator.nothing
nonlocal = EntityCreator.nonlocal
self.privRegisterTypes({'activeCell': nonlocal,
'crusherCell': nonlocal,
'battleBlocker': nonlocal,
'beanBarrel': nonlocal,
'button': nonlocal,
'conveyorBelt': ConveyorBelt.ConveyorBelt,
'crate': nonlocal,
'door': nonlocal,
'directionalCell': nonlocal,
'gagBarrel': nonlocal,
'gear': GearEntity.GearEntity,
'goon': nonlocal,
'gridGoon': nonlocal,
'golfGreenGame': nonlocal,
'goonClipPlane': GoonClipPlane.GoonClipPlane,
'grid': nonlocal,
'healBarrel': nonlocal,
'levelMgr': FactoryLevelMgr.FactoryLevelMgr,
'lift': nonlocal,
'mintProduct': MintProduct.MintProduct,
'mintProductPallet': MintProductPallet.MintProductPallet,
'mintShelf': MintShelf.MintShelf,
'mover': nonlocal,
'paintMixer': PaintMixer.PaintMixer,
'pathMaster': PathMasterEntity.PathMasterEntity,
'rendering': RenderingEntity.RenderingEntity,
'platform': PlatformEntity.PlatformEntity,
'sinkingPlatform': nonlocal,
'stomper': nonlocal,
'stomperPair': nonlocal,
'laserField': nonlocal,
'securityCamera': nonlocal,
'elevatorMarker': nonlocal,
'trigger': nonlocal,
'moleField': nonlocal,
'maze': nonlocal})
| false | true |
f7301020113d711cc94ea89e1f61b1588ee24669 | 30,958 | py | Python | func.py | jhe8281/openSW | 6f3bc5bb34996616a6e862b48e5d164da12344a7 | [
"BSD-3-Clause"
] | 2 | 2019-01-16T02:03:41.000Z | 2019-03-07T04:43:08.000Z | func.py | jhe8281/openSW | 6f3bc5bb34996616a6e862b48e5d164da12344a7 | [
"BSD-3-Clause"
] | null | null | null | func.py | jhe8281/openSW | 6f3bc5bb34996616a6e862b48e5d164da12344a7 | [
"BSD-3-Clause"
] | null | null | null | import email.mime.text
import urllib.request
import sqlite3
import hashlib
import smtplib
import bcrypt
import flask
import json
import html
import sys
import re
import os
try:
import css_html_js_minify
except:
pass
if sys.version_info < (3, 6):
import sha3
from set_mark.tool import *
from mark import *
def load_conn(data):
global conn
global curs
conn = data
curs = conn.cursor()
load_conn2(data)
def send_email(who, title, data):
smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465)
try:
curs.execute('select name, data from other where name = "g_email" or name = "g_pass"')
rep_data = curs.fetchall()
if rep_data:
g_email = ''
g_pass = ''
for i in rep_data:
if i[0] == 'g_email':
g_email = i[1]
else:
g_pass = i[1]
smtp.login(g_email, g_pass)
msg = email.mime.text.MIMEText(data)
msg['Subject'] = title
smtp.sendmail(g_email, who, msg.as_string())
smtp.quit()
except:
print('error : email login error')
def easy_minify(data, tool = None):
try:
if not tool:
data = css_html_js_minify.html_minify(data)
else:
if tool == 'css':
data = css_html_js_minify.css_minify(data)
elif tool == 'js':
data = css_html_js_minify.js_minify(data)
except:
data = re.sub('\n +<', '\n<', data)
data = re.sub('>(\n| )+<', '> <', data)
return data
def render_set(title = '', data = '', num = 0):
if acl_check(title, 'render') == 1:
return 'http request 401.3'
else:
return namumark(title, data, num)
def captcha_get():
data = ''
if custom()[2] == 0:
curs.execute('select data from other where name = "recaptcha"')
recaptcha = curs.fetchall()
if recaptcha and recaptcha[0][0] != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data += recaptcha[0][0] + '<hr class=\"main_hr\">'
return data
def update():
# v3.0.5 사용자 문서, 파일 문서, 분류 문서 영어화
try:
all_rep = [['사용자:', 'user:'], ['파일:', 'file:'], ['분류:', 'category:']]
all_rep2 = ['data', 'history', 'acl', 'topic', 'back']
test = 0
for i in range(3):
for j in range(6):
if not j == 5:
curs.execute('select title from ' + all_rep2[j] + ' where title like ?', [all_rep[i][0] + '%'])
else:
curs.execute('select link from back where link like ?', [all_rep[i][0] + '%'])
user_rep = curs.fetchall()
if user_rep:
for user_rep2 in user_rep:
test = 1
first = re.sub('^' + all_rep[i][0], all_rep[i][1], user_rep2[0])
if j == 0:
curs.execute("update data set title = ? where title = ?", [first, user_rep2[0]])
elif j == 1:
curs.execute("update history set title = ? where title = ?", [first, user_rep2[0]])
elif j == 2:
curs.execute("update acl set title = ? where title = ?", [first, user_rep2[0]])
elif j == 3:
curs.execute("update topic set title = ? where title = ?", [first, user_rep2[0]])
elif j == 4:
curs.execute("update back set title = ? where title = ?", [first, user_rep2[0]])
elif j == 5:
curs.execute("update back set link = ? where link = ?", [first, user_rep2[0]])
if test == 1:
print('사용자 to user, 파일 to file, 분류 to category')
except:
pass
# v3.0.8 rd, agreedis, stop 테이블 통합
try:
curs.execute("select title, sub, close from stop")
for i in curs.fetchall():
if i[2] == '':
curs.execute("update rd set stop = 'S' where title = ? and sub = ?", [i[0], i[1]])
else:
curs.execute("update rd set stop = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("select title, sub from agreedis")
for i in curs.fetchall():
curs.execute("update rd set agree = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("drop table if exists stop")
curs.execute("drop table if exists agreedis")
except:
pass
def pw_encode(data, data2 = '', type_d = ''):
if type_d == '':
curs.execute('select data from other where name = "encode"')
set_data = curs.fetchall()
type_d = set_data[0][0]
if type_d == 'sha256':
return hashlib.sha256(bytes(data, 'utf-8')).hexdigest()
elif type_d == 'sha3':
if sys.version_info < (3, 6):
return sha3.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
return hashlib.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
if data2 != '':
salt_data = bytes(data2, 'utf-8')
else:
salt_data = bcrypt.gensalt(11)
return bcrypt.hashpw(bytes(data, 'utf-8'), salt_data).decode()
def pw_check(data, data2, type_d = 'no', id_d = ''):
curs.execute('select data from other where name = "encode"')
db_data = curs.fetchall()
if type_d != 'no':
if type_d == '':
set_data = 'bcrypt'
else:
set_data = type_d
else:
set_data = db_data[0][0]
while 1:
if set_data in ['sha256', 'sha3']:
data3 = pw_encode(data = data, type_d = set_data)
if data3 == data2:
re_data = 1
else:
re_data = 0
break
else:
try:
if pw_encode(data, data2, 'bcrypt') == data2:
re_data = 1
else:
re_data = 0
break
except:
set_data = db_data[0][0]
if db_data[0][0] != set_data and re_data == 1 and id_d != '':
curs.execute("update user set pw = ?, encode = ? where id = ?", [pw_encode(data), db_data[0][0], id_d])
return re_data
def captcha_post(re_data, num = 1):
if num == 1:
if custom()[2] == 0 and captcha_get() != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data = urllib.request.urlopen('https://www.google.com/recaptcha/api/siteverify?secret=' + sec_re[0][0] + '&response=' + re_data)
if not data:
return 0
else:
json_data = data.read().decode(data.headers.get_content_charset())
json_data = json.loads(json_data)
if data.getcode() == 200 and json_data['success'] == True:
return 0
else:
return 1
else:
return 0
else:
return 0
else:
pass
def load_lang(data, num = 2):
if num == 1:
curs.execute("select data from other where name = 'language'")
rep_data = curs.fetchall()
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
if data in lang:
return lang[data]
else:
return data + ' (missing)'
else:
curs.execute('select data from user_set where name = "lang" and id = ?', [ip_check()])
rep_data = curs.fetchall()
if rep_data:
try:
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
except:
return load_lang(data, 1)
if data in lang:
return lang[data]
else:
return load_lang(data, 1)
else:
return load_lang(data, 1)
def load_oauth(provider):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
return oauth[provider]
def update_oauth(provider, target, content):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
oauth[provider][target] = content
with open('oauthsettings.json', 'w', encoding='utf-8') as f:
json.dump(oauth, f)
return 'Done'
def ip_or_user(data):
if re.search('(\.|:)', data):
return 1
else:
return 0
def edit_help_button():
# https://stackoverflow.com/questions/11076975/insert-text-into-textarea-at-cursor-position-javascript
js_data = '''
<script>
function insert_data(name, data) {
if(document.selection) {
document.getElementById(name).focus();
sel = document.selection.createRange();
sel.text = data;
} else if(document.getElementById(name).selectionStart || document.getElementById(name).selectionStart == '0') {
var startPos = document.getElementById(name).selectionStart;
var endPos = document.getElementById(name).selectionEnd;
document.getElementById(name).value = document.getElementById(name).value.substring(0, startPos) + data + document.getElementById(name).value.substring(endPos, document.getElementById(name).value.length);
} else {
document.getElementById(name).value += data;
}
}
</script>
'''
insert_list = [['[[|]]', '[[|]]'], ['[*()]', '[*()]'], ['{{{#!}}}', '{{{#!}}}'], ['||<>||', '||<>||'], ["\\'\\'\\'", "\'\'\'"]]
data = ''
for insert_data in insert_list:
data += '<a href="javascript:void(0);" onclick="insert_data(\'content\', \'' + insert_data[0] + '\');">(' + insert_data[1] + ')</a> '
return [js_data, data + '<hr class=\"main_hr\">']
def ip_warring():
if custom()[2] == 0:
curs.execute('select data from other where name = "no_login_warring"')
data = curs.fetchall()
if data and data[0][0] != '':
text_data = '<span>' + data[0][0] + '</span><hr class=\"main_hr\">'
else:
text_data = '<span>' + load_lang('no_login_warring') + '</span><hr class=\"main_hr\">'
else:
text_data = ''
return text_data
def skin_check():
skin = './views/neo_yousoro/'
curs.execute('select data from other where name = "skin"')
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
curs.execute('select data from user_set where name = "skin" and id = ?', [ip_check()])
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
return skin + 'index.html'
def next_fix(link, num, page, end = 50):
list_data = ''
if num == 1:
if len(page) == end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
elif len(page) != end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a>'
else:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a> <a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
return list_data
def other2(data):
return data + ['']
def wiki_set(num = 1):
if num == 1:
data_list = []
curs.execute('select data from other where name = ?', ['name'])
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['wiki']
curs.execute('select data from other where name = "license"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['CC 0']
data_list += ['', '']
curs.execute('select data from other where name = "logo"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += [data_list[0]]
curs.execute("select data from other where name = 'head'")
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['']
return data_list
if num == 2:
var_data = 'FrontPage'
curs.execute('select data from other where name = "frontpage"')
elif num == 3:
var_data = '2'
curs.execute('select data from other where name = "upload"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
return db_data[0][0]
else:
return var_data
def diff(seqm):
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output += [seqm.a[a0:a1]]
elif opcode == 'insert':
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
elif opcode == 'delete':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
elif opcode == 'replace':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
end = ''.join(output)
end = end.replace('\r\n', '\n')
sub = ''
if not re.search('\n', end):
end += '\n'
num = 0
left = 1
while 1:
data = re.search('((?:(?!\n).)*)\n', end)
if data:
data = data.groups()[0]
left += 1
if re.search('<span style=\'(?:(?:(?!\').)+)\'>', data):
num += 1
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if num > 0:
sub += str(left) + ' : ' + re.sub('(?P<in>.*)\n', '\g<in>', data, 1) + '<br>'
end = re.sub('((?:(?!\n).)*)\n', '', end, 1)
else:
break
return sub
def admin_check(num = None, what = None):
ip = ip_check()
curs.execute("select acl from user where id = ?", [ip])
user = curs.fetchall()
if user:
reset = 0
while 1:
if num == 1 and reset == 0:
check = 'ban'
elif num == 3 and reset == 0:
check = 'toron'
elif num == 4 and reset == 0:
check = 'check'
elif num == 5 and reset == 0:
check = 'acl'
elif num == 6 and reset == 0:
check = 'hidel'
elif num == 7 and reset == 0:
check = 'give'
else:
check = 'owner'
curs.execute('select name from alist where name = ? and acl = ?', [user[0][0], check])
if curs.fetchall():
if what:
curs.execute("insert into re_admin (who, what, time) values (?, ?, ?)", [ip, what, get_time()])
conn.commit()
return 1
else:
if reset == 0:
reset = 1
else:
break
return 0
def ip_pas(raw_ip):
hide = 0
if re.search("(\.|:)", raw_ip):
if not re.search("^" + load_lang('tool', 1) + ":", raw_ip):
curs.execute("select data from other where name = 'ip_view'")
data = curs.fetchall()
if data and data[0][0] != '':
ip = '<span style="font-size: 75%;">' + hashlib.md5(bytes(raw_ip, 'utf-8')).hexdigest() + '</span>'
if not admin_check('ban', None):
hide = 1
else:
ip = raw_ip
else:
ip = raw_ip
hide = 1
else:
curs.execute("select title from data where title = ?", ['user:' + raw_ip])
if curs.fetchall():
ip = '<a href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
else:
ip = '<a id="not_thing" href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
if hide == 0:
ip += ' <a href="/tool/' + url_pas(raw_ip) + '">(' + load_lang('tool') + ')</a>'
return ip
def custom():
if 'head' in flask.session:
user_head = flask.session['head']
else:
user_head = ''
if 'state' in flask.session and flask.session['state'] == 1:
curs.execute('select name from alarm where name = ? limit 1', [ip_check()])
if curs.fetchall():
user_icon = 2
else:
user_icon = 1
else:
user_icon = 0
if user_icon != 0:
curs.execute('select data from user_set where name = "email" and id = ?', [ip_check()])
data = curs.fetchall()
if data:
email = data[0][0]
else:
email = ''
else:
email = ''
if user_icon != 0:
user_name = ip_check()
else:
user_name = load_lang('user')
return ['', '', user_icon, user_head, email, user_name, load_lang(data = '', num = 2)]
def load_skin(data = ''):
div2 = ''
system_file = ['main_css', 'easter_egg.html']
if data == '':
ip = ip_check()
curs.execute('select data from user_set where name = "skin" and id = ?', [ip])
data = curs.fetchall()
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if not data:
curs.execute('select data from other where name = "skin"')
sql_data = curs.fetchall()
if sql_data and sql_data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
elif data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
else:
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if data == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
return div2
def acl_check(name, tool = ''):
ip = ip_check()
if tool == 'render':
curs.execute("select view from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'view (' + name + ')') == 1:
return 1
return 0
else:
if ban_check() == 1:
return 1
acl_c = re.search("^user:([^/]*)", name)
if acl_c:
acl_n = acl_c.groups()
if admin_check(5, None) == 1:
return 0
curs.execute("select dec from acl where title = ?", ['user:' + acl_n[0]])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'all':
return 0
if acl_data[0][0] == 'user' and not re.search("(\.|:)", ip):
return 0
if ip != acl_n[0] or re.search("(\.|:)", ip):
return 1
if ip == acl_n[0] and not re.search("(\.|:)", ip) and not re.search("(\.|:)", acl_n[0]):
return 0
else:
return 1
file_c = re.search("^file:(.*)", name)
if file_c and admin_check(5, 'edit (' + name + ')') != 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute("select dec from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'edit (' + name + ')') == 1:
return 1
curs.execute('select data from other where name = "edit"')
set_data = curs.fetchall()
if set_data:
if set_data[0][0] == 'login':
if not user_data:
return 1
if set_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, None) == 1:
return 1
return 0
def ban_check(ip = None, tool = None):
if not ip:
ip = ip_check()
band = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if band:
band_it = band.groups()[0]
else:
band_it = '-'
curs.execute("select end, login from ban where block = ?", [band_it])
band_d = curs.fetchall()
curs.execute("select end, login from ban where block = ?", [ip])
ban_d = curs.fetchall()
data = band_d or ban_d
if data and (data[0][0] == '' or data[0][0] > get_time()):
if tool and tool == 'login':
if data[0][1] == 'O':
return 0
return 1
return 0
def topic_check(name, sub):
ip = ip_check()
if ban_check() == 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute('select data from other where name = "discussion"')
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'login':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select dis from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select title from rd where title = ? and sub = ? and not stop = ''", [name, sub])
if curs.fetchall():
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
return 0
def ban_insert(name, end, why, login, blocker):
now_time = get_time()
if re.search("^([0-9]{1,3}\.[0-9]{1,3})$", name):
band = 'O'
else:
band = ''
curs.execute("select block from ban where block = ?", [name])
if curs.fetchall():
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, load_lang('release', 1), now_time, blocker, '', band])
curs.execute("delete from ban where block = ?", [name])
else:
if login != '':
login = 'O'
else:
login = ''
if end != '0':
time = datetime.datetime.now()
plus = datetime.timedelta(seconds = int(end))
r_time = (time + plus).strftime("%Y-%m-%d %H:%M:%S")
else:
r_time = ''
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, r_time, now_time, blocker, why, band])
curs.execute("insert into ban (block, end, why, band, login) values (?, ?, ?, ?, ?)", [name, r_time, why, band, login])
conn.commit()
def rd_plus(title, sub, date):
curs.execute("select title from rd where title = ? and sub = ?", [title, sub])
if curs.fetchall():
curs.execute("update rd set date = ? where title = ? and sub = ?", [date, title, sub])
else:
curs.execute("insert into rd (title, sub, date) values (?, ?, ?)", [title, sub, date])
def history_plus(title, data, date, ip, send, leng):
curs.execute("select id from history where title = ? order by id + 0 desc limit 1", [title])
id_data = curs.fetchall()
curs.execute("insert into history (id, title, data, date, ip, send, leng, hide) values (?, ?, ?, ?, ?, ?, ?, '')", [str(int(id_data[0][0]) + 1) if id_data else '1', title, data, date, ip, send, leng])
def leng_check(first, second):
if first < second:
all_plus = '+' + str(second - first)
elif second < first:
all_plus = '-' + str(first - second)
else:
all_plus = '0'
return all_plus
def edit_filter_do(data):
if admin_check(1, 'edit_filter pass') != 1:
curs.execute("select regex, sub from filter")
for data_list in curs.fetchall():
match = re.compile(data_list[0], re.I)
if match.search(data):
ban_insert(
ip_check(),
'0' if data_list[1] == 'X' else data_list[1],
load_lang('edit', 1) + ' ' + load_lang('filter', 1),
None,
load_lang('tool', 1) + ':' + load_lang('edit', 1) + ' ' + load_lang('filter', 1)
)
return 1
return 0
def redirect(data):
return flask.redirect(data)
def re_error(data):
conn.commit()
if data == '/ban':
ip = ip_check()
end = '<li>' + load_lang('why') + ' : ' + load_lang('authority_error') + '</li>'
if ban_check() == 1:
curs.execute("select end, why from ban where block = ?", [ip])
end_data = curs.fetchall()
if not end_data:
match = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if match:
curs.execute("select end, why from ban where block = ?", [match.groups()[0]])
end_data = curs.fetchall()
if end_data:
end = '<li>' + load_lang('state') + ' : ' + load_lang('ban') + '</li><li>'
if end_data[0][0]:
now = int(re.sub('(\-| |:)', '', get_time()))
day = int(re.sub('(\-| |:)', '', end_data[0][0]))
if now >= day:
curs.execute("delete from ban where block = ?", [ip])
conn.commit()
end += '<script>location.reload();</script>'
else:
end += 'end : ' + end_data[0][0]
else:
end += load_lang('limitless')
end += '</li>'
if end_data[0][1] != '':
end += '<li>' + load_lang('why') + ' : ' + end_data[0][1] + '</li>'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul>' + end + '</ul>',
menu = 0
))
else:
error_data = re.search('\/error\/([0-9]+)', data)
if error_data:
num = int(error_data.groups()[0])
if num == 1:
data = load_lang('no_login_error')
elif num == 2:
data = load_lang('no_exist_user_error')
elif num == 3:
data = load_lang('authority_error')
elif num == 4:
data = load_lang('no_admin_block_error')
elif num == 5:
data = load_lang('skin_error')
elif num == 6:
data = load_lang('same_id_exist_error')
elif num == 7:
data = load_lang('long_id_error')
elif num == 8:
data = load_lang('id_char_error') + ' <a href="/name_filter">(' + load_lang('id') + ' ' + load_lang('filter') + ')</a>'
elif num == 9:
data = load_lang('file_exist_error')
elif num == 10:
data = load_lang('password_error')
elif num == 13:
data = load_lang('recaptcha_error')
elif num == 14:
data = load_lang('file_extension_error')
elif num == 15:
data = load_lang('edit_record_error')
elif num == 16:
data = load_lang('same_file_error')
elif num == 17:
data = load_lang('file_capacity_error') + ' ' + wiki_set(3)
elif num == 19:
data = load_lang('decument_exist_error')
elif num == 20:
data = load_lang('password_diffrent_error')
elif num == 21:
data = load_lang('edit_filter_error')
elif num == 22:
data = load_lang('file_name_error')
else:
data = '???'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul><li>' + data + '</li></ul>',
menu = 0
))
else:
return redirect('/') | 33.686616 | 233 | 0.473093 | import email.mime.text
import urllib.request
import sqlite3
import hashlib
import smtplib
import bcrypt
import flask
import json
import html
import sys
import re
import os
try:
import css_html_js_minify
except:
pass
if sys.version_info < (3, 6):
import sha3
from set_mark.tool import *
from mark import *
def load_conn(data):
global conn
global curs
conn = data
curs = conn.cursor()
load_conn2(data)
def send_email(who, title, data):
smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465)
try:
curs.execute('select name, data from other where name = "g_email" or name = "g_pass"')
rep_data = curs.fetchall()
if rep_data:
g_email = ''
g_pass = ''
for i in rep_data:
if i[0] == 'g_email':
g_email = i[1]
else:
g_pass = i[1]
smtp.login(g_email, g_pass)
msg = email.mime.text.MIMEText(data)
msg['Subject'] = title
smtp.sendmail(g_email, who, msg.as_string())
smtp.quit()
except:
print('error : email login error')
def easy_minify(data, tool = None):
try:
if not tool:
data = css_html_js_minify.html_minify(data)
else:
if tool == 'css':
data = css_html_js_minify.css_minify(data)
elif tool == 'js':
data = css_html_js_minify.js_minify(data)
except:
data = re.sub('\n +<', '\n<', data)
data = re.sub('>(\n| )+<', '> <', data)
return data
def render_set(title = '', data = '', num = 0):
if acl_check(title, 'render') == 1:
return 'http request 401.3'
else:
return namumark(title, data, num)
def captcha_get():
data = ''
if custom()[2] == 0:
curs.execute('select data from other where name = "recaptcha"')
recaptcha = curs.fetchall()
if recaptcha and recaptcha[0][0] != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data += recaptcha[0][0] + '<hr class=\"main_hr\">'
return data
def update():
try:
all_rep = [['사용자:', 'user:'], ['파일:', 'file:'], ['분류:', 'category:']]
all_rep2 = ['data', 'history', 'acl', 'topic', 'back']
test = 0
for i in range(3):
for j in range(6):
if not j == 5:
curs.execute('select title from ' + all_rep2[j] + ' where title like ?', [all_rep[i][0] + '%'])
else:
curs.execute('select link from back where link like ?', [all_rep[i][0] + '%'])
user_rep = curs.fetchall()
if user_rep:
for user_rep2 in user_rep:
test = 1
first = re.sub('^' + all_rep[i][0], all_rep[i][1], user_rep2[0])
if j == 0:
curs.execute("update data set title = ? where title = ?", [first, user_rep2[0]])
elif j == 1:
curs.execute("update history set title = ? where title = ?", [first, user_rep2[0]])
elif j == 2:
curs.execute("update acl set title = ? where title = ?", [first, user_rep2[0]])
elif j == 3:
curs.execute("update topic set title = ? where title = ?", [first, user_rep2[0]])
elif j == 4:
curs.execute("update back set title = ? where title = ?", [first, user_rep2[0]])
elif j == 5:
curs.execute("update back set link = ? where link = ?", [first, user_rep2[0]])
if test == 1:
print('사용자 to user, 파일 to file, 분류 to category')
except:
pass
try:
curs.execute("select title, sub, close from stop")
for i in curs.fetchall():
if i[2] == '':
curs.execute("update rd set stop = 'S' where title = ? and sub = ?", [i[0], i[1]])
else:
curs.execute("update rd set stop = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("select title, sub from agreedis")
for i in curs.fetchall():
curs.execute("update rd set agree = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("drop table if exists stop")
curs.execute("drop table if exists agreedis")
except:
pass
def pw_encode(data, data2 = '', type_d = ''):
if type_d == '':
curs.execute('select data from other where name = "encode"')
set_data = curs.fetchall()
type_d = set_data[0][0]
if type_d == 'sha256':
return hashlib.sha256(bytes(data, 'utf-8')).hexdigest()
elif type_d == 'sha3':
if sys.version_info < (3, 6):
return sha3.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
return hashlib.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
if data2 != '':
salt_data = bytes(data2, 'utf-8')
else:
salt_data = bcrypt.gensalt(11)
return bcrypt.hashpw(bytes(data, 'utf-8'), salt_data).decode()
def pw_check(data, data2, type_d = 'no', id_d = ''):
curs.execute('select data from other where name = "encode"')
db_data = curs.fetchall()
if type_d != 'no':
if type_d == '':
set_data = 'bcrypt'
else:
set_data = type_d
else:
set_data = db_data[0][0]
while 1:
if set_data in ['sha256', 'sha3']:
data3 = pw_encode(data = data, type_d = set_data)
if data3 == data2:
re_data = 1
else:
re_data = 0
break
else:
try:
if pw_encode(data, data2, 'bcrypt') == data2:
re_data = 1
else:
re_data = 0
break
except:
set_data = db_data[0][0]
if db_data[0][0] != set_data and re_data == 1 and id_d != '':
curs.execute("update user set pw = ?, encode = ? where id = ?", [pw_encode(data), db_data[0][0], id_d])
return re_data
def captcha_post(re_data, num = 1):
if num == 1:
if custom()[2] == 0 and captcha_get() != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data = urllib.request.urlopen('https://www.google.com/recaptcha/api/siteverify?secret=' + sec_re[0][0] + '&response=' + re_data)
if not data:
return 0
else:
json_data = data.read().decode(data.headers.get_content_charset())
json_data = json.loads(json_data)
if data.getcode() == 200 and json_data['success'] == True:
return 0
else:
return 1
else:
return 0
else:
return 0
else:
pass
def load_lang(data, num = 2):
if num == 1:
curs.execute("select data from other where name = 'language'")
rep_data = curs.fetchall()
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
if data in lang:
return lang[data]
else:
return data + ' (missing)'
else:
curs.execute('select data from user_set where name = "lang" and id = ?', [ip_check()])
rep_data = curs.fetchall()
if rep_data:
try:
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
except:
return load_lang(data, 1)
if data in lang:
return lang[data]
else:
return load_lang(data, 1)
else:
return load_lang(data, 1)
def load_oauth(provider):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
return oauth[provider]
def update_oauth(provider, target, content):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
oauth[provider][target] = content
with open('oauthsettings.json', 'w', encoding='utf-8') as f:
json.dump(oauth, f)
return 'Done'
def ip_or_user(data):
if re.search('(\.|:)', data):
return 1
else:
return 0
def edit_help_button():
js_data = '''
<script>
function insert_data(name, data) {
if(document.selection) {
document.getElementById(name).focus();
sel = document.selection.createRange();
sel.text = data;
} else if(document.getElementById(name).selectionStart || document.getElementById(name).selectionStart == '0') {
var startPos = document.getElementById(name).selectionStart;
var endPos = document.getElementById(name).selectionEnd;
document.getElementById(name).value = document.getElementById(name).value.substring(0, startPos) + data + document.getElementById(name).value.substring(endPos, document.getElementById(name).value.length);
} else {
document.getElementById(name).value += data;
}
}
</script>
'''
insert_list = [['[[|]]', '[[|]]'], ['[*()]', '[*()]'], ['{{{#!}}}', '{{{#!}}}'], ['||<>||', '||<>||'], ["\\'\\'\\'", "\'\'\'"]]
data = ''
for insert_data in insert_list:
data += '<a href="javascript:void(0);" onclick="insert_data(\'content\', \'' + insert_data[0] + '\');">(' + insert_data[1] + ')</a> '
return [js_data, data + '<hr class=\"main_hr\">']
def ip_warring():
if custom()[2] == 0:
curs.execute('select data from other where name = "no_login_warring"')
data = curs.fetchall()
if data and data[0][0] != '':
text_data = '<span>' + data[0][0] + '</span><hr class=\"main_hr\">'
else:
text_data = '<span>' + load_lang('no_login_warring') + '</span><hr class=\"main_hr\">'
else:
text_data = ''
return text_data
def skin_check():
skin = './views/neo_yousoro/'
curs.execute('select data from other where name = "skin"')
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
curs.execute('select data from user_set where name = "skin" and id = ?', [ip_check()])
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
return skin + 'index.html'
def next_fix(link, num, page, end = 50):
list_data = ''
if num == 1:
if len(page) == end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
elif len(page) != end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a>'
else:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a> <a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
return list_data
def other2(data):
return data + ['']
def wiki_set(num = 1):
if num == 1:
data_list = []
curs.execute('select data from other where name = ?', ['name'])
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['wiki']
curs.execute('select data from other where name = "license"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['CC 0']
data_list += ['', '']
curs.execute('select data from other where name = "logo"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += [data_list[0]]
curs.execute("select data from other where name = 'head'")
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['']
return data_list
if num == 2:
var_data = 'FrontPage'
curs.execute('select data from other where name = "frontpage"')
elif num == 3:
var_data = '2'
curs.execute('select data from other where name = "upload"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
return db_data[0][0]
else:
return var_data
def diff(seqm):
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output += [seqm.a[a0:a1]]
elif opcode == 'insert':
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
elif opcode == 'delete':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
elif opcode == 'replace':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
end = ''.join(output)
end = end.replace('\r\n', '\n')
sub = ''
if not re.search('\n', end):
end += '\n'
num = 0
left = 1
while 1:
data = re.search('((?:(?!\n).)*)\n', end)
if data:
data = data.groups()[0]
left += 1
if re.search('<span style=\'(?:(?:(?!\').)+)\'>', data):
num += 1
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if num > 0:
sub += str(left) + ' : ' + re.sub('(?P<in>.*)\n', '\g<in>', data, 1) + '<br>'
end = re.sub('((?:(?!\n).)*)\n', '', end, 1)
else:
break
return sub
def admin_check(num = None, what = None):
ip = ip_check()
curs.execute("select acl from user where id = ?", [ip])
user = curs.fetchall()
if user:
reset = 0
while 1:
if num == 1 and reset == 0:
check = 'ban'
elif num == 3 and reset == 0:
check = 'toron'
elif num == 4 and reset == 0:
check = 'check'
elif num == 5 and reset == 0:
check = 'acl'
elif num == 6 and reset == 0:
check = 'hidel'
elif num == 7 and reset == 0:
check = 'give'
else:
check = 'owner'
curs.execute('select name from alist where name = ? and acl = ?', [user[0][0], check])
if curs.fetchall():
if what:
curs.execute("insert into re_admin (who, what, time) values (?, ?, ?)", [ip, what, get_time()])
conn.commit()
return 1
else:
if reset == 0:
reset = 1
else:
break
return 0
def ip_pas(raw_ip):
hide = 0
if re.search("(\.|:)", raw_ip):
if not re.search("^" + load_lang('tool', 1) + ":", raw_ip):
curs.execute("select data from other where name = 'ip_view'")
data = curs.fetchall()
if data and data[0][0] != '':
ip = '<span style="font-size: 75%;">' + hashlib.md5(bytes(raw_ip, 'utf-8')).hexdigest() + '</span>'
if not admin_check('ban', None):
hide = 1
else:
ip = raw_ip
else:
ip = raw_ip
hide = 1
else:
curs.execute("select title from data where title = ?", ['user:' + raw_ip])
if curs.fetchall():
ip = '<a href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
else:
ip = '<a id="not_thing" href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
if hide == 0:
ip += ' <a href="/tool/' + url_pas(raw_ip) + '">(' + load_lang('tool') + ')</a>'
return ip
def custom():
if 'head' in flask.session:
user_head = flask.session['head']
else:
user_head = ''
if 'state' in flask.session and flask.session['state'] == 1:
curs.execute('select name from alarm where name = ? limit 1', [ip_check()])
if curs.fetchall():
user_icon = 2
else:
user_icon = 1
else:
user_icon = 0
if user_icon != 0:
curs.execute('select data from user_set where name = "email" and id = ?', [ip_check()])
data = curs.fetchall()
if data:
email = data[0][0]
else:
email = ''
else:
email = ''
if user_icon != 0:
user_name = ip_check()
else:
user_name = load_lang('user')
return ['', '', user_icon, user_head, email, user_name, load_lang(data = '', num = 2)]
def load_skin(data = ''):
div2 = ''
system_file = ['main_css', 'easter_egg.html']
if data == '':
ip = ip_check()
curs.execute('select data from user_set where name = "skin" and id = ?', [ip])
data = curs.fetchall()
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if not data:
curs.execute('select data from other where name = "skin"')
sql_data = curs.fetchall()
if sql_data and sql_data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
elif data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
else:
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if data == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
return div2
def acl_check(name, tool = ''):
ip = ip_check()
if tool == 'render':
curs.execute("select view from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'view (' + name + ')') == 1:
return 1
return 0
else:
if ban_check() == 1:
return 1
acl_c = re.search("^user:([^/]*)", name)
if acl_c:
acl_n = acl_c.groups()
if admin_check(5, None) == 1:
return 0
curs.execute("select dec from acl where title = ?", ['user:' + acl_n[0]])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'all':
return 0
if acl_data[0][0] == 'user' and not re.search("(\.|:)", ip):
return 0
if ip != acl_n[0] or re.search("(\.|:)", ip):
return 1
if ip == acl_n[0] and not re.search("(\.|:)", ip) and not re.search("(\.|:)", acl_n[0]):
return 0
else:
return 1
file_c = re.search("^file:(.*)", name)
if file_c and admin_check(5, 'edit (' + name + ')') != 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute("select dec from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'edit (' + name + ')') == 1:
return 1
curs.execute('select data from other where name = "edit"')
set_data = curs.fetchall()
if set_data:
if set_data[0][0] == 'login':
if not user_data:
return 1
if set_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, None) == 1:
return 1
return 0
def ban_check(ip = None, tool = None):
if not ip:
ip = ip_check()
band = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if band:
band_it = band.groups()[0]
else:
band_it = '-'
curs.execute("select end, login from ban where block = ?", [band_it])
band_d = curs.fetchall()
curs.execute("select end, login from ban where block = ?", [ip])
ban_d = curs.fetchall()
data = band_d or ban_d
if data and (data[0][0] == '' or data[0][0] > get_time()):
if tool and tool == 'login':
if data[0][1] == 'O':
return 0
return 1
return 0
def topic_check(name, sub):
ip = ip_check()
if ban_check() == 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute('select data from other where name = "discussion"')
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'login':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select dis from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select title from rd where title = ? and sub = ? and not stop = ''", [name, sub])
if curs.fetchall():
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
return 0
def ban_insert(name, end, why, login, blocker):
now_time = get_time()
if re.search("^([0-9]{1,3}\.[0-9]{1,3})$", name):
band = 'O'
else:
band = ''
curs.execute("select block from ban where block = ?", [name])
if curs.fetchall():
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, load_lang('release', 1), now_time, blocker, '', band])
curs.execute("delete from ban where block = ?", [name])
else:
if login != '':
login = 'O'
else:
login = ''
if end != '0':
time = datetime.datetime.now()
plus = datetime.timedelta(seconds = int(end))
r_time = (time + plus).strftime("%Y-%m-%d %H:%M:%S")
else:
r_time = ''
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, r_time, now_time, blocker, why, band])
curs.execute("insert into ban (block, end, why, band, login) values (?, ?, ?, ?, ?)", [name, r_time, why, band, login])
conn.commit()
def rd_plus(title, sub, date):
curs.execute("select title from rd where title = ? and sub = ?", [title, sub])
if curs.fetchall():
curs.execute("update rd set date = ? where title = ? and sub = ?", [date, title, sub])
else:
curs.execute("insert into rd (title, sub, date) values (?, ?, ?)", [title, sub, date])
def history_plus(title, data, date, ip, send, leng):
curs.execute("select id from history where title = ? order by id + 0 desc limit 1", [title])
id_data = curs.fetchall()
curs.execute("insert into history (id, title, data, date, ip, send, leng, hide) values (?, ?, ?, ?, ?, ?, ?, '')", [str(int(id_data[0][0]) + 1) if id_data else '1', title, data, date, ip, send, leng])
def leng_check(first, second):
if first < second:
all_plus = '+' + str(second - first)
elif second < first:
all_plus = '-' + str(first - second)
else:
all_plus = '0'
return all_plus
def edit_filter_do(data):
if admin_check(1, 'edit_filter pass') != 1:
curs.execute("select regex, sub from filter")
for data_list in curs.fetchall():
match = re.compile(data_list[0], re.I)
if match.search(data):
ban_insert(
ip_check(),
'0' if data_list[1] == 'X' else data_list[1],
load_lang('edit', 1) + ' ' + load_lang('filter', 1),
None,
load_lang('tool', 1) + ':' + load_lang('edit', 1) + ' ' + load_lang('filter', 1)
)
return 1
return 0
def redirect(data):
return flask.redirect(data)
def re_error(data):
conn.commit()
if data == '/ban':
ip = ip_check()
end = '<li>' + load_lang('why') + ' : ' + load_lang('authority_error') + '</li>'
if ban_check() == 1:
curs.execute("select end, why from ban where block = ?", [ip])
end_data = curs.fetchall()
if not end_data:
match = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if match:
curs.execute("select end, why from ban where block = ?", [match.groups()[0]])
end_data = curs.fetchall()
if end_data:
end = '<li>' + load_lang('state') + ' : ' + load_lang('ban') + '</li><li>'
if end_data[0][0]:
now = int(re.sub('(\-| |:)', '', get_time()))
day = int(re.sub('(\-| |:)', '', end_data[0][0]))
if now >= day:
curs.execute("delete from ban where block = ?", [ip])
conn.commit()
end += '<script>location.reload();</script>'
else:
end += 'end : ' + end_data[0][0]
else:
end += load_lang('limitless')
end += '</li>'
if end_data[0][1] != '':
end += '<li>' + load_lang('why') + ' : ' + end_data[0][1] + '</li>'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul>' + end + '</ul>',
menu = 0
))
else:
error_data = re.search('\/error\/([0-9]+)', data)
if error_data:
num = int(error_data.groups()[0])
if num == 1:
data = load_lang('no_login_error')
elif num == 2:
data = load_lang('no_exist_user_error')
elif num == 3:
data = load_lang('authority_error')
elif num == 4:
data = load_lang('no_admin_block_error')
elif num == 5:
data = load_lang('skin_error')
elif num == 6:
data = load_lang('same_id_exist_error')
elif num == 7:
data = load_lang('long_id_error')
elif num == 8:
data = load_lang('id_char_error') + ' <a href="/name_filter">(' + load_lang('id') + ' ' + load_lang('filter') + ')</a>'
elif num == 9:
data = load_lang('file_exist_error')
elif num == 10:
data = load_lang('password_error')
elif num == 13:
data = load_lang('recaptcha_error')
elif num == 14:
data = load_lang('file_extension_error')
elif num == 15:
data = load_lang('edit_record_error')
elif num == 16:
data = load_lang('same_file_error')
elif num == 17:
data = load_lang('file_capacity_error') + ' ' + wiki_set(3)
elif num == 19:
data = load_lang('decument_exist_error')
elif num == 20:
data = load_lang('password_diffrent_error')
elif num == 21:
data = load_lang('edit_filter_error')
elif num == 22:
data = load_lang('file_name_error')
else:
data = '???'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul><li>' + data + '</li></ul>',
menu = 0
))
else:
return redirect('/') | true | true |
f73010c48c4a643298d574ef9fdbe70bed0b28c8 | 20,916 | py | Python | fitapp/tests/test_integration.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | null | null | null | fitapp/tests/test_integration.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | null | null | null | fitapp/tests/test_integration.py | thesignalcenter/django-fitbit | aa17ee5dacbbf4ad1edea85f480829185e6f39f9 | [
"Apache-2.0"
] | 2 | 2018-06-21T20:12:01.000Z | 2019-06-11T23:32:07.000Z | import json
import time
from collections import OrderedDict
from datetime import datetime
import requests_mock
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.test.utils import override_settings
from django.urls import reverse
from fitbit.exceptions import HTTPConflict
from freezegun import freeze_time
from mock import patch
from requests.auth import _basic_auth_str
from fitapp import utils
from fitapp.decorators import fitbit_integration_warning
from fitapp.models import TimeSeriesDataType, UserFitbit
from fitapp.tasks import subscribe, unsubscribe
from .base import FitappTestBase
class TestIntegrationUtility(FitappTestBase):
def test_is_integrated(self):
"""Users with stored OAuth information are integrated."""
self.assertTrue(utils.is_integrated(self.user))
def test_is_not_integrated(self):
"""User is not integrated if we have no OAuth data for them"""
UserFitbit.objects.all().delete()
self.assertFalse(utils.is_integrated(self.user))
def test_unauthenticated(self):
"""User is not integrated if they aren't logged in."""
user = AnonymousUser()
self.assertFalse(utils.is_integrated(user))
class TestIntegrationDecorator(FitappTestBase):
def setUp(self):
super(TestIntegrationDecorator, self).setUp()
self.fake_request = HttpRequest()
self.fake_request.user = self.user
self.fake_view = lambda request: "hello"
self.messages = []
def _mock_decorator(self, msg=None):
def mock_error(request, message, *args, **kwargs):
self.messages.append(message)
with patch.object(messages, 'error', mock_error) as error:
return fitbit_integration_warning(msg=msg)(self.fake_view)(
self.fake_request)
def test_unauthenticated(self):
"""Message should be added if user is not logged in."""
self.fake_request.user = AnonymousUser()
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0], utils.get_setting('FITAPP_DECORATOR_MESSAGE'))
def test_is_integrated(self):
"""Decorator should have no effect if user is integrated."""
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 0)
def test_is_not_integrated(self):
"""Message should be added if user is not integrated."""
UserFitbit.objects.all().delete()
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0], utils.get_setting('FITAPP_DECORATOR_MESSAGE'))
def test_custom_msg(self):
"""Decorator should support a custom message string."""
UserFitbit.objects.all().delete()
msg = "customized"
results = self._mock_decorator(msg)
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0], "customized")
def test_custom_msg_func(self):
"""Decorator should support a custom message function."""
UserFitbit.objects.all().delete()
msg = lambda request: "message to {0}".format(request.user)
results = self._mock_decorator(msg)
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0], msg(self.fake_request))
class TestLoginView(FitappTestBase):
url_name = 'fitbit-login'
def test_get(self):
"""
Login view should generate a token_url and then
redirect to an authorization URL.
"""
response = self._mock_client()
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unauthenticated(self):
"""User must be logged in to access Login view."""
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unintegrated(self):
"""Fitbit credentials not required to access Login view."""
self.fbuser.delete()
response = self._mock_client()
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(UserFitbit.objects.count(), 0)
def test_next(self):
response = self._mock_client(get_kwargs={'next': '/next'})
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(self.client.session.get('fitbit_next', None), '/next')
self.assertEqual(UserFitbit.objects.count(), 1)
class TestCompleteView(FitappTestBase):
url_name = 'fitbit-complete'
user_id = 'userid'
token = {
'access_token': 'AccessToken123',
'refresh_token': 'RefreshToken123',
'expires_at': time.time() + 300,
'user_id': user_id
}
code = 'Code123'
def setUp(self):
super(TestCompleteView, self).setUp()
self.fbuser.delete()
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete(self, tsd_apply_async, sub_apply_async):
"""Complete view should fetch & store user's access credentials."""
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_once_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
tsdts = TimeSeriesDataType.objects.all()
self.assertEqual(tsd_apply_async.call_count, tsdts.count())
for i, _type in enumerate(tsdts):
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, _type.category, _type.resource,),
countdown=10 + (i * 5))
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
@override_settings(FITAPP_HISTORICAL_INIT_DELAY=11)
@override_settings(FITAPP_BETWEEN_DELAY=6)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_different_delays(self, tsd_apply_async, sub_apply_async):
"""Complete view should use configured delays"""
tsdts = TimeSeriesDataType.objects.all()
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
for i, _type in enumerate(tsdts):
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, _type.category, _type.resource,),
countdown=11 + (i * 6))
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_empty_subs(self, tsd_apply_async, sub_apply_async):
"""Complete view should not import data if subs dict is empty"""
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([('foods', [])]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_no_res(self, tsd_apply_async, sub_apply_async):
"""Complete view shouldn't import data if subs dict has no resources"""
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([
('foods', ['steps'])
]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_bad_resources(self, tsd_apply_async, sub_apply_async):
"""
Complete view shouldn't import data if subs dict has invalid resources
"""
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertContains(
response,
"['steps'] resources are invalid for the foods category",
status_code=500
)
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([
('activities', ['steps', 'calories', 'distance', 'activityCalories']),
('foods', ['log/water']),
]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_sub_list(self, tsd_apply_async, sub_apply_async):
"""
Complete view should only import the listed subscriptions, in the right
order
"""
activities = TimeSeriesDataType.activities
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'steps',), countdown=10)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'calories',), countdown=15)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'distance',), countdown=20)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'activityCalories'), countdown=25)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, TimeSeriesDataType.foods, 'log/water',),
countdown=30)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_already_integrated(self, tsd_apply_async, sub_apply_async):
"""
Complete view redirect to the error view if a user attempts to connect
an already integrated fitbit user to a second user.
"""
self.create_userfitbit(user=self.user, fitbit_user=self.user_id)
username = '{0}2'.format(self.username)
self.create_user(username=username, password=self.password)
self.client.logout()
self.client.login(username=username, password=self.password)
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.all().count(), 1)
self.assertEqual(sub_apply_async.call_count, 0)
self.assertEqual(tsd_apply_async.call_count, 0)
def test_unauthenticated(self):
"""User must be logged in to access Complete view."""
self.client.logout()
response = self._mock_client()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_next(self, tsd_apply_async, sub_apply_async):
"""
Complete view should redirect to session['fitbit_next'] if available.
"""
self._set_session_vars(fitbit_next='/test')
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, '/test')
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_once_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
self.assertEqual(
tsd_apply_async.call_count, TimeSeriesDataType.objects.count())
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.expires_at, self.token['expires_at'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
def test_access_error(self):
"""
Complete view should redirect to error if access token is
inaccessible.
"""
response = self._mock_client(client_kwargs={'error': Exception})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
def test_no_code(self):
"""
Complete view should redirect to error if `code` param is not
present.
"""
response = self._mock_client()
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
def test_no_access_token(self):
"""
Complete view should redirect to error if there isn't an access_token.
"""
token = self.token.copy()
token.pop('access_token')
response = self._mock_client(
client_kwargs=token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_integrated(self, tsd_apply_async, sub_apply_async):
"""Complete view should overwrite existing credentials for this user.
"""
self.fbuser = self.create_userfitbit(user=self.user)
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
self.assertEqual(tsd_apply_async.call_count,
TimeSeriesDataType.objects.count())
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
class TestErrorView(FitappTestBase):
url_name = 'fitbit-error'
def test_get(self):
"""Should be able to retrieve Error page."""
response = self._get()
self.assertEqual(response.status_code, 200)
def test_unauthenticated(self):
"""User must be logged in to access Error view."""
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
def test_unintegrated(self):
"""No Fitbit credentials required to access Error view."""
self.fbuser.delete()
response = self._get()
self.assertEqual(response.status_code, 200)
class TestLogoutView(FitappTestBase):
url_name = 'fitbit-logout'
@patch('fitapp.tasks.unsubscribe.apply_async')
def test_get(self, apply_async):
"""Logout view should remove associated UserFitbit and redirect."""
response = self._get()
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
apply_async.assert_called_once_with(kwargs=kwargs, countdown=5)
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(UserFitbit.objects.count(), 0)
@freeze_time(datetime.fromtimestamp(1483500000))
@patch('fitbit.Fitbit.subscription')
def test_get_token_expired(self, subscription):
subs_url = 'https://api.fitbit.com/1/user/-/apiSubscriptions.json'
self.fbuser.expires_at = 1483400000
self.fbuser.save()
sub = {
'ownerId': self.fbuser.fitbit_user,
'subscriberId': '1',
'subscriptionId': str(self.user.id),
'collectionType': 'user',
'ownerType': 'user'
}
subs = {'apiSubscriptions': [sub]}
tok = {
'access_token': 'fake_return_access_token',
'refresh_token': 'fake_return_refresh_token',
'expires_at': 1483600000,
}
with requests_mock.mock() as m:
m.get(subs_url, text=json.dumps(subs), status_code=200)
m.post('https://api.fitbit.com/oauth2/token', text=json.dumps(tok))
response = self._get()
mock_requests = m.request_history
assert mock_requests[0].path == '/oauth2/token'
assert mock_requests[0].headers['Authorization'] == _basic_auth_str(
settings.FITAPP_CONSUMER_KEY,
settings.FITAPP_CONSUMER_SECRET
)
assert mock_requests[1].path == '/1/user/-/apisubscriptions.json'
assert mock_requests[1].headers['Authorization'] == 'Bearer {}'.format(
tok['access_token']
)
subscription.assert_called_once_with(
sub['subscriptionId'], sub['subscriberId'], method="DELETE")
def test_unauthenticated(self):
"""User must be logged in to access Logout view."""
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unintegrated(self):
"""No Fitbit credentials required to access Logout view."""
self.fbuser.delete()
response = self._get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.unsubscribe.apply_async')
def test_next(self, apply_async):
"""Logout view should redirect to GET['next'] if available."""
response = self._get(get_kwargs={'next': '/test'})
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
apply_async.assert_called_with(kwargs=kwargs, countdown=5)
self.assertRedirectsNoFollow(response, '/test')
self.assertEqual(UserFitbit.objects.count(), 0)
class TestSubscription(FitappTestBase):
@patch('fitbit.Fitbit.subscription')
def test_subscribe(self, subscription):
subscribe.apply_async((self.fbuser.fitbit_user, 1,))
subscription.assert_called_once_with(self.user.id, 1, )
@patch('fitbit.Fitbit.subscription')
def test_subscribe_error(self, subscription):
subscription.side_effect = HTTPConflict
apply_result = subscribe.apply_async((self.fbuser.fitbit_user, 1,))
self.assertEqual(apply_result.status, 'REJECTED')
subscription.assert_called_once_with(self.user.id, 1, )
@patch('fitbit.Fitbit.subscription')
@patch('fitbit.Fitbit.list_subscriptions')
def test_unsubscribe(self, list_subscriptions, subscription):
sub = {
'ownerId': self.fbuser.fitbit_user,
'subscriberId': '1',
'subscriptionId': str(self.user.id).encode('utf8'),
'collectionType': 'user',
'ownerType': 'user'
}
list_subscriptions.return_value = {'apiSubscriptions': [sub]}
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
unsubscribe.apply_async(kwargs=kwargs)
list_subscriptions.assert_called_once_with()
subscription.assert_called_once_with(
sub['subscriptionId'], sub['subscriberId'], method="DELETE")
@patch('fitbit.Fitbit.subscription')
@patch('fitbit.Fitbit.list_subscriptions')
def test_unsubscribe_error(self, list_subscriptions, subscription):
list_subscriptions.side_effect = HTTPConflict
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
result = unsubscribe.apply_async(kwargs=kwargs)
self.assertEqual(result.status, 'REJECTED')
list_subscriptions.assert_called_once_with()
self.assertEqual(subscription.call_count, 0)
| 41.335968 | 81 | 0.672117 | import json
import time
from collections import OrderedDict
from datetime import datetime
import requests_mock
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.test.utils import override_settings
from django.urls import reverse
from fitbit.exceptions import HTTPConflict
from freezegun import freeze_time
from mock import patch
from requests.auth import _basic_auth_str
from fitapp import utils
from fitapp.decorators import fitbit_integration_warning
from fitapp.models import TimeSeriesDataType, UserFitbit
from fitapp.tasks import subscribe, unsubscribe
from .base import FitappTestBase
class TestIntegrationUtility(FitappTestBase):
def test_is_integrated(self):
self.assertTrue(utils.is_integrated(self.user))
def test_is_not_integrated(self):
UserFitbit.objects.all().delete()
self.assertFalse(utils.is_integrated(self.user))
def test_unauthenticated(self):
user = AnonymousUser()
self.assertFalse(utils.is_integrated(user))
class TestIntegrationDecorator(FitappTestBase):
def setUp(self):
super(TestIntegrationDecorator, self).setUp()
self.fake_request = HttpRequest()
self.fake_request.user = self.user
self.fake_view = lambda request: "hello"
self.messages = []
def _mock_decorator(self, msg=None):
def mock_error(request, message, *args, **kwargs):
self.messages.append(message)
with patch.object(messages, 'error', mock_error) as error:
return fitbit_integration_warning(msg=msg)(self.fake_view)(
self.fake_request)
def test_unauthenticated(self):
self.fake_request.user = AnonymousUser()
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0], utils.get_setting('FITAPP_DECORATOR_MESSAGE'))
def test_is_integrated(self):
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 0)
def test_is_not_integrated(self):
UserFitbit.objects.all().delete()
results = self._mock_decorator()
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(
self.messages[0], utils.get_setting('FITAPP_DECORATOR_MESSAGE'))
def test_custom_msg(self):
UserFitbit.objects.all().delete()
msg = "customized"
results = self._mock_decorator(msg)
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0], "customized")
def test_custom_msg_func(self):
UserFitbit.objects.all().delete()
msg = lambda request: "message to {0}".format(request.user)
results = self._mock_decorator(msg)
self.assertEqual(results, "hello")
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0], msg(self.fake_request))
class TestLoginView(FitappTestBase):
url_name = 'fitbit-login'
def test_get(self):
response = self._mock_client()
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unauthenticated(self):
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unintegrated(self):
self.fbuser.delete()
response = self._mock_client()
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(UserFitbit.objects.count(), 0)
def test_next(self):
response = self._mock_client(get_kwargs={'next': '/next'})
self.assertRedirectsNoFollow(response, '/complete/')
self.assertEqual(self.client.session.get('fitbit_next', None), '/next')
self.assertEqual(UserFitbit.objects.count(), 1)
class TestCompleteView(FitappTestBase):
url_name = 'fitbit-complete'
user_id = 'userid'
token = {
'access_token': 'AccessToken123',
'refresh_token': 'RefreshToken123',
'expires_at': time.time() + 300,
'user_id': user_id
}
code = 'Code123'
def setUp(self):
super(TestCompleteView, self).setUp()
self.fbuser.delete()
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete(self, tsd_apply_async, sub_apply_async):
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_once_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
tsdts = TimeSeriesDataType.objects.all()
self.assertEqual(tsd_apply_async.call_count, tsdts.count())
for i, _type in enumerate(tsdts):
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, _type.category, _type.resource,),
countdown=10 + (i * 5))
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
@override_settings(FITAPP_HISTORICAL_INIT_DELAY=11)
@override_settings(FITAPP_BETWEEN_DELAY=6)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_different_delays(self, tsd_apply_async, sub_apply_async):
tsdts = TimeSeriesDataType.objects.all()
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
for i, _type in enumerate(tsdts):
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, _type.category, _type.resource,),
countdown=11 + (i * 6))
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_empty_subs(self, tsd_apply_async, sub_apply_async):
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([('foods', [])]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_no_res(self, tsd_apply_async, sub_apply_async):
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([
('foods', ['steps'])
]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_bad_resources(self, tsd_apply_async, sub_apply_async):
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertContains(
response,
"['steps'] resources are invalid for the foods category",
status_code=500
)
self.assertEqual(tsd_apply_async.call_count, 0)
@override_settings(FITAPP_SUBSCRIPTIONS=OrderedDict([
('activities', ['steps', 'calories', 'distance', 'activityCalories']),
('foods', ['log/water']),
]))
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_sub_list(self, tsd_apply_async, sub_apply_async):
activities = TimeSeriesDataType.activities
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'steps',), countdown=10)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'calories',), countdown=15)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'distance',), countdown=20)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, activities, 'activityCalories'), countdown=25)
tsd_apply_async.assert_any_call(
(fbuser.fitbit_user, TimeSeriesDataType.foods, 'log/water',),
countdown=30)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_complete_already_integrated(self, tsd_apply_async, sub_apply_async):
self.create_userfitbit(user=self.user, fitbit_user=self.user_id)
username = '{0}2'.format(self.username)
self.create_user(username=username, password=self.password)
self.client.logout()
self.client.login(username=username, password=self.password)
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.all().count(), 1)
self.assertEqual(sub_apply_async.call_count, 0)
self.assertEqual(tsd_apply_async.call_count, 0)
def test_unauthenticated(self):
self.client.logout()
response = self._mock_client()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_next(self, tsd_apply_async, sub_apply_async):
self._set_session_vars(fitbit_next='/test')
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, '/test')
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_once_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
self.assertEqual(
tsd_apply_async.call_count, TimeSeriesDataType.objects.count())
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.expires_at, self.token['expires_at'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
def test_access_error(self):
response = self._mock_client(client_kwargs={'error': Exception})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
def test_no_code(self):
response = self._mock_client()
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
def test_no_access_token(self):
token = self.token.copy()
token.pop('access_token')
response = self._mock_client(
client_kwargs=token, get_kwargs={'code': self.code})
self.assertRedirectsNoFollow(response, reverse('fitbit-error'))
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.subscribe.apply_async')
@patch('fitapp.tasks.get_time_series_data.apply_async')
def test_integrated(self, tsd_apply_async, sub_apply_async):
self.fbuser = self.create_userfitbit(user=self.user)
response = self._mock_client(
client_kwargs=self.token, get_kwargs={'code': self.code})
fbuser = UserFitbit.objects.get()
sub_apply_async.assert_called_with(
(fbuser.fitbit_user, settings.FITAPP_SUBSCRIBER_ID), countdown=5)
self.assertEqual(tsd_apply_async.call_count,
TimeSeriesDataType.objects.count())
self.assertEqual(fbuser.user, self.user)
self.assertEqual(fbuser.access_token, self.token['access_token'])
self.assertEqual(fbuser.refresh_token, self.token['refresh_token'])
self.assertEqual(fbuser.fitbit_user, self.user_id)
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
class TestErrorView(FitappTestBase):
url_name = 'fitbit-error'
def test_get(self):
response = self._get()
self.assertEqual(response.status_code, 200)
def test_unauthenticated(self):
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
def test_unintegrated(self):
self.fbuser.delete()
response = self._get()
self.assertEqual(response.status_code, 200)
class TestLogoutView(FitappTestBase):
url_name = 'fitbit-logout'
@patch('fitapp.tasks.unsubscribe.apply_async')
def test_get(self, apply_async):
response = self._get()
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
apply_async.assert_called_once_with(kwargs=kwargs, countdown=5)
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(UserFitbit.objects.count(), 0)
@freeze_time(datetime.fromtimestamp(1483500000))
@patch('fitbit.Fitbit.subscription')
def test_get_token_expired(self, subscription):
subs_url = 'https://api.fitbit.com/1/user/-/apiSubscriptions.json'
self.fbuser.expires_at = 1483400000
self.fbuser.save()
sub = {
'ownerId': self.fbuser.fitbit_user,
'subscriberId': '1',
'subscriptionId': str(self.user.id),
'collectionType': 'user',
'ownerType': 'user'
}
subs = {'apiSubscriptions': [sub]}
tok = {
'access_token': 'fake_return_access_token',
'refresh_token': 'fake_return_refresh_token',
'expires_at': 1483600000,
}
with requests_mock.mock() as m:
m.get(subs_url, text=json.dumps(subs), status_code=200)
m.post('https://api.fitbit.com/oauth2/token', text=json.dumps(tok))
response = self._get()
mock_requests = m.request_history
assert mock_requests[0].path == '/oauth2/token'
assert mock_requests[0].headers['Authorization'] == _basic_auth_str(
settings.FITAPP_CONSUMER_KEY,
settings.FITAPP_CONSUMER_SECRET
)
assert mock_requests[1].path == '/1/user/-/apisubscriptions.json'
assert mock_requests[1].headers['Authorization'] == 'Bearer {}'.format(
tok['access_token']
)
subscription.assert_called_once_with(
sub['subscriptionId'], sub['subscriberId'], method="DELETE")
def test_unauthenticated(self):
self.client.logout()
response = self._get()
self.assertEqual(response.status_code, 302)
self.assertEqual(UserFitbit.objects.count(), 1)
def test_unintegrated(self):
self.fbuser.delete()
response = self._get()
self.assertRedirectsNoFollow(
response, utils.get_setting('FITAPP_LOGIN_REDIRECT'))
self.assertEqual(UserFitbit.objects.count(), 0)
@patch('fitapp.tasks.unsubscribe.apply_async')
def test_next(self, apply_async):
response = self._get(get_kwargs={'next': '/test'})
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
apply_async.assert_called_with(kwargs=kwargs, countdown=5)
self.assertRedirectsNoFollow(response, '/test')
self.assertEqual(UserFitbit.objects.count(), 0)
class TestSubscription(FitappTestBase):
@patch('fitbit.Fitbit.subscription')
def test_subscribe(self, subscription):
subscribe.apply_async((self.fbuser.fitbit_user, 1,))
subscription.assert_called_once_with(self.user.id, 1, )
@patch('fitbit.Fitbit.subscription')
def test_subscribe_error(self, subscription):
subscription.side_effect = HTTPConflict
apply_result = subscribe.apply_async((self.fbuser.fitbit_user, 1,))
self.assertEqual(apply_result.status, 'REJECTED')
subscription.assert_called_once_with(self.user.id, 1, )
@patch('fitbit.Fitbit.subscription')
@patch('fitbit.Fitbit.list_subscriptions')
def test_unsubscribe(self, list_subscriptions, subscription):
sub = {
'ownerId': self.fbuser.fitbit_user,
'subscriberId': '1',
'subscriptionId': str(self.user.id).encode('utf8'),
'collectionType': 'user',
'ownerType': 'user'
}
list_subscriptions.return_value = {'apiSubscriptions': [sub]}
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
unsubscribe.apply_async(kwargs=kwargs)
list_subscriptions.assert_called_once_with()
subscription.assert_called_once_with(
sub['subscriptionId'], sub['subscriberId'], method="DELETE")
@patch('fitbit.Fitbit.subscription')
@patch('fitbit.Fitbit.list_subscriptions')
def test_unsubscribe_error(self, list_subscriptions, subscription):
list_subscriptions.side_effect = HTTPConflict
kwargs = self.fbuser.get_user_data()
del kwargs['refresh_cb']
result = unsubscribe.apply_async(kwargs=kwargs)
self.assertEqual(result.status, 'REJECTED')
list_subscriptions.assert_called_once_with()
self.assertEqual(subscription.call_count, 0)
| true | true |
f730117870c82072f11be34d8f41060542937d2d | 2,060 | py | Python | otcextensions/tests/functional/sdk/vpc/v1/test_vpc.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 10 | 2018-03-03T17:59:59.000Z | 2020-01-08T10:03:00.000Z | otcextensions/tests/functional/sdk/vpc/v1/test_vpc.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 39 | 2018-03-26T14:43:23.000Z | 2020-02-07T16:42:53.000Z | otcextensions/tests/functional/sdk/vpc/v1/test_vpc.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 9 | 2018-03-27T09:17:40.000Z | 2019-08-07T12:53:49.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack import _log
from otcextensions.sdk.vpc.v1 import vpc
from otcextensions.tests.functional import base
_logger = _log.setup_logging('openstack')
class TestService(base.BaseFunctionalTest):
ID = None
uuid = uuid.uuid4().hex[:8]
def setUp(self):
super(TestService, self).setUp()
attrs = {
'name': "test-vpc-" + self.uuid,
'cidr': '192.168.0.0/24'
}
self.NAME = "test-vpc-" + self.uuid
self.UPDATE_NAME = "test-vpc-upd-" + self.uuid
self.vpc = self.conn.vpc.create_vpc(**attrs)
assert isinstance(self.vpc, vpc.Vpc)
self.assertEqual(self.NAME, self.vpc.name)
self.ID = self.vpc.id
self.addCleanup(self.conn.vpc.delete_vpc, self.vpc)
def test_find_vpc(self):
found = self.conn.vpc.find_vpc(self.NAME)
self.assertEqual(found.id, self.ID)
def test_get_vpc(self):
found = self.conn.vpc.get_vpc(self.ID)
self.assertEqual(found.name, self.NAME)
self.assertEqual(found.id, self.ID)
def test_list_vpcs(self):
vpcs = [o.name for o in self.conn.vpc.vpcs()]
self.assertIn(self.NAME, vpcs)
def test_update_vpc(self):
new_attrs = {
'name': self.UPDATE_NAME,
'cidr': '192.168.0.0/16'
}
updated = self.conn.vpc.update_vpc(self.ID, **new_attrs)
self.assertEqual(updated.name, new_attrs['name'])
self.assertEqual(updated.cidr, new_attrs['cidr'])
| 32.1875 | 75 | 0.655825 |
import uuid
from openstack import _log
from otcextensions.sdk.vpc.v1 import vpc
from otcextensions.tests.functional import base
_logger = _log.setup_logging('openstack')
class TestService(base.BaseFunctionalTest):
ID = None
uuid = uuid.uuid4().hex[:8]
def setUp(self):
super(TestService, self).setUp()
attrs = {
'name': "test-vpc-" + self.uuid,
'cidr': '192.168.0.0/24'
}
self.NAME = "test-vpc-" + self.uuid
self.UPDATE_NAME = "test-vpc-upd-" + self.uuid
self.vpc = self.conn.vpc.create_vpc(**attrs)
assert isinstance(self.vpc, vpc.Vpc)
self.assertEqual(self.NAME, self.vpc.name)
self.ID = self.vpc.id
self.addCleanup(self.conn.vpc.delete_vpc, self.vpc)
def test_find_vpc(self):
found = self.conn.vpc.find_vpc(self.NAME)
self.assertEqual(found.id, self.ID)
def test_get_vpc(self):
found = self.conn.vpc.get_vpc(self.ID)
self.assertEqual(found.name, self.NAME)
self.assertEqual(found.id, self.ID)
def test_list_vpcs(self):
vpcs = [o.name for o in self.conn.vpc.vpcs()]
self.assertIn(self.NAME, vpcs)
def test_update_vpc(self):
new_attrs = {
'name': self.UPDATE_NAME,
'cidr': '192.168.0.0/16'
}
updated = self.conn.vpc.update_vpc(self.ID, **new_attrs)
self.assertEqual(updated.name, new_attrs['name'])
self.assertEqual(updated.cidr, new_attrs['cidr'])
| true | true |
f73012160ab63e97ce0cba976dae618df8d31d23 | 15,096 | py | Python | src/python/org/cassandra/geo_maps/geo_maps.py | cassandra/geo_maps | 0257bd73456f9312070e3f7627effee30b73fdea | [
"MIT"
] | null | null | null | src/python/org/cassandra/geo_maps/geo_maps.py | cassandra/geo_maps | 0257bd73456f9312070e3f7627effee30b73fdea | [
"MIT"
] | null | null | null | src/python/org/cassandra/geo_maps/geo_maps.py | cassandra/geo_maps | 0257bd73456f9312070e3f7627effee30b73fdea | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import math
from typing import List
from .display_bounds import DisplayBounds
from .geo_bounds import GeoBounds
from .view_box import ViewBox
from . import utils
@dataclass
class AlbersMapProjection:
# Ref: https://en.wikipedia.org/wiki/Albers_projection
# The center of the displayed map
#
reference_longitude_deg : float
reference_latitude_deg : float
standard_parallel_1_deg : float
standard_parallel_2_deg : float
# Ref: https://spatialreference.org/ref/esri/usa-contiguous-albers-equal-area-conic/prettywkt/
#
# SPHEROID["GRS_1980",6378137,298.257222101]] -> 6378137 meters = 3963.190592 miles
#
radius_miles = utils.EARTH_RADIUS_AT_EQUATOR_MILES
# For zero comparisons
EPSILON = 0.000000001
@property
def reference_longitude_radians(self):
return math.radians( self.reference_longitude_deg )
@property
def reference_latitude_radians(self):
return math.radians( self.reference_latitude_deg )
@property
def standard_parallel_1_radians(self):
return math.radians( self.standard_parallel_1_deg )
@property
def standard_parallel_2_radians(self):
return math.radians( self.standard_parallel_2_deg )
def __post_init__(self):
# Common for all projections
self.n = 0.5 * ( math.sin( self.standard_parallel_1_radians )
+ math.sin( self.standard_parallel_2_radians ) )
self.C = ( math.cos( self.standard_parallel_1_radians ) ** 2 ) \
+ 2 * self.n * math.sin( self.standard_parallel_1_radians )
self.rho_0 = ( self.radius_miles / self.n ) \
* math.sqrt( self.C - ( 2 * self.n * math.sin( self.reference_latitude_radians ) ))
return
def x_y_from_deg( self, longitude_deg : float, latitude_deg : float ):
# Ref: https://en.wikipedia.org/wiki/Albers_projection#Formulas
longitude = math.radians( longitude_deg )
latitude = math.radians( latitude_deg )
theta = self.n * ( longitude - self.reference_longitude_radians )
rho_basis = self.C - ( 2 * self.n * math.sin( latitude ))
if rho_basis < 0.0:
return ( 0, 0 )
rho = ( self.radius_miles / self.n ) * math.sqrt( rho_basis )
x = rho * math.sin( theta )
y = self.rho_0 - ( rho * math.cos( theta ))
return ( x, y )
def deg_from_x_y( self, x : float, y : float ):
# Ref: https://mathworld.wolfram.com/AlbersEqual-AreaConicProjection.html
rho_0_minus_y = self.rho_0 - y
rho = math.sqrt( x**2 + rho_0_minus_y**2 )
if abs(rho) > self.EPSILON:
if self.n < 0.0:
rho *= -1.0
x *= -1.0
rho_0_minus_y *= -1.0
rho_adjusted = rho * self.n / self.radius_miles
latitude_operand = ( self.C - ( rho_adjusted * rho_adjusted ) ) / ( 2 * self.n )
if abs(latitude_operand) <= 1.0:
latitude_radians = math.asin( latitude_operand )
elif latitude_operand < 0.0:
latitude_radians = -1.0 * math.pi / 2.0
else:
latitude_radians = math.pi / 2.0
theta = math.atan2( x, rho_0_minus_y )
else:
theta = 0.0
if self.n > 0:
latitude_radians = math.pi / 2.0
else:
latitude_radians = -1.0 * math.pi / 2.0
longitude_radians = self.reference_longitude_radians + ( theta / self.n )
longitude_deg = math.degrees( longitude_radians )
latitude_deg = math.degrees( latitude_radians )
return ( longitude_deg, latitude_deg )
@dataclass
class GeoMap:
""" Defines how a map projection lines up with an SVG file of a map with that projection. """
projection : AlbersMapProjection
geo_bounds : GeoBounds
svg_template_name : str
view_box : ViewBox
# To adjust for the placement of the image (in SVG view box scale units)
display_x_offset : float = None
display_y_offset : float = None
display_x_scale : float = None
display_y_scale : float = None
rotation_angle_deg : float = None
calibration_points : List = None
def __post_init__(self):
self._rotation_angle_radians = None
self._sine_angle = None
self._cosine_angle = None
if self.rotation_angle_deg:
self._rotation_angle_radians = math.radians( self.rotation_angle_deg )
self._sine_angle = math.sin( self._rotation_angle_radians )
self._cosine_angle = math.cos( self._rotation_angle_radians )
return
@property
def aspect_ratio(self):
return self.view_box.width / self.view_box.height
def long_lat_deg_to_coords( self, longitude_deg, latitude_deg ):
projected_x, projected_y = self.projection.x_y_from_deg( longitude_deg = longitude_deg,
latitude_deg = latitude_deg )
if self._rotation_angle_radians:
rotated_x = ( projected_x * self._cosine_angle ) - ( projected_y * self._sine_angle )
rotated_y = ( projected_x * self._sine_angle ) + ( projected_y * self._cosine_angle )
scaled_x = rotated_x * self.display_x_scale
scaled_y = rotated_y * self.display_y_scale
else:
scaled_x = projected_x * self.display_x_scale
scaled_y = projected_y * self.display_y_scale
offset_x = scaled_x + self.display_x_offset
offset_y = self.display_y_offset - scaled_y
return ( offset_x , offset_y )
def coords_to_long_lat_deg( self, x, y ):
offset_x = x - self.display_x_offset
offset_y = self.display_y_offset - y
scaled_x = offset_x / self.display_x_scale
scaled_y = offset_y / self.display_y_scale
if self._rotation_angle_radians:
rotated_x = ( scaled_x * self._cosine_angle ) + ( scaled_y * self._sine_angle )
rotated_y = ( -1.0 * scaled_x * self._sine_angle ) + ( scaled_y * self._cosine_angle )
longitude, latitude = self.projection.deg_from_x_y( x = rotated_x, y = rotated_y )
else:
longitude, latitude = self.projection.deg_from_x_y( x = scaled_x, y = scaled_y )
return ( longitude, latitude )
USA_CONTINENTAL_PROJECTION = AlbersMapProjection(
# References:
#
# https://gis.stackexchange.com/questions/141580/which-projection-is-best-for-mapping-the-contiguous-united-states
# https://spatialreference.org/ref/esri/usa-contiguous-albers-equal-area-conic/html/
#
# From: https://pubs.usgs.gov/bul/1532/report.pdf, p. 94
#
# Albers Equal-Area Conic projection, with standard parallels 20° G.nd 60° N.
# This illustration includes all of North America to show the change in spacing of the
# parallels. When used for maps of the 48 conterminous States standard parallels
# are 29.5° and 45.5° N.
#
# For maps of Alaska, the chosen standard parallels are lats. 55° and
# 65° N., and for Hawaii, lats. 8° and 18° N. In the latter case,
# both parallels are south of the islands, but they were chosen to
# include maps of the more southerly Canal Zone and especially the
# Philippine Islands.
reference_longitude_deg = -96.0,
reference_latitude_deg = 37.5,
standard_parallel_1_deg = 29.5,
standard_parallel_2_deg = 45.5,
)
ALASKA_PROJECTION = AlbersMapProjection(
# References:
# https://epsg.io/3338
reference_longitude_deg = -154.0,
reference_latitude_deg = 50.0,
standard_parallel_1_deg = 55.0,
standard_parallel_2_deg = 65.0,
)
HAWAII_PROJECTION = AlbersMapProjection(
# References:
# https://epsg.io/102007
reference_longitude_deg = -157.0,
reference_latitude_deg = 13.0,
standard_parallel_1_deg = 8.0,
standard_parallel_2_deg = 18.0,
)
USA_CONTINENTAL_GEO_MAP = GeoMap(
# Values arrived at by trial and error via map calibration testing page.
projection = USA_CONTINENTAL_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -124.8679,
longitude_max = -66.8628,
latitude_min = 24.3959,
latitude_max = 49.3877,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.3332,
display_y_scale = 0.3318,
display_x_offset = 491.0249,
display_y_offset = 323.6935,
)
ALASKA_CONTINENTAL_GEO_MAP = GeoMap(
# Values arrived at by trial and error via map calibration testing page.
projection = ALASKA_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -180.0,
longitude_max = -129.993,
latitude_min = 50.5,
latitude_max = 71.5232,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.1301,
display_y_scale = 0.1311,
display_x_offset = 132.4555,
display_y_offset = 638.5017,
rotation_angle_deg = -11.0,
)
HAWAII_CONTINENTAL_GEO_MAP = GeoMap(
# Values arrived at by trial and error via map calibration testing page.
projection = HAWAII_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -160.3922,
longitude_max = -154.6271,
latitude_min = 18.71,
latitude_max = 22.3386,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.3279,
display_y_scale = 0.3371,
display_x_offset = 325.5313,
display_y_offset = 729.5,
rotation_angle_deg = -0.5,
)
class CompositeGeoMap:
"""Combines multiple GeoMaps that share the same SVG file. i.e.,
multiple maps rendered together.
To common example of this is the US map showing Alaska and Hawaii in
the lower right hand corner below the Continetal US. These are not in
the same geo coordinate space as the 48 continuous states *AND* they
use different projection parameters (same Albers projection type, but
different reference points on the globe.)
Note that this means that the GeoBounds for the map can be a list of
bounds, not just one bounding box, since different areas of the map can
represent different geographic areas.
"""
def __init__( self, map_id : int, geo_map_list : List[GeoMap] ):
""" First one in list is considered default. List cannot be empty. """
assert( geo_map_list )
self._map_id = map_id
self._geo_map_list = geo_map_list
self._default_geo_map = self._geo_map_list[0]
self._geo_bounds = GeoBounds()
self._geo_bounds_list = list()
svg_template_name_set = set() # GeoMap often share the same SVG
for geo_map in self._geo_map_list:
# Make sure each view box reflects this composite map's id
geo_map.view_box.map_id = self._map_id
self._geo_bounds.add_bounds( geo_map.geo_bounds )
self._geo_bounds_list.append( geo_map.geo_bounds )
svg_template_name_set.add( geo_map.svg_template_name )
continue
self._svg_template_name_list = list(svg_template_name_set)
return
@property
def map_id(self):
return self._map_id
@property
def geo_bounds(self):
""" A single view of the union of bounds for all contained GeoMap """
return self._geo_bounds
@property
def geo_bounds_list(self):
""" A list of the individual bounds for each contained GeoMap """
return self._geo_bounds_list
@property
def default_view_box(self):
return self._default_geo_map.view_box
@property
def default_reference_longitude_deg(self):
return self._default_geo_map.projection.reference_longitude_deg
@property
def default_reference_latitude_deg(self):
return self._default_geo_map.projection.reference_latitude_deg
@property
def default_aspect_ratio(self):
return self._default_geo_map.aspect_ratio
@property
def svg_template_name_list(self):
return self._svg_template_name_list
def contains_bounds( self, geo_bounds : GeoBounds ):
for geo_map_bounds in self._geo_bounds_list:
if geo_map_bounds.contains_bounds( other_geo_bounds = geo_bounds ):
return True
continue
return False
def get_geo_map_for_point( self,
longitude_deg : float,
latitude_deg : float ):
for geo_map in self._geo_map_list:
if geo_map.geo_bounds.contains_point( longitude_deg = longitude_deg,
latitude_deg = latitude_deg ):
return geo_map
continue
return self._default_geo_map
def geo_bounds_to_display_bounds( self, geo_bounds : GeoBounds ):
display_bounds = DisplayBounds()
for geo_map in self._geo_map_list:
intersection_geo_bounds = geo_map.geo_bounds.intersect( geo_bounds )
if not intersection_geo_bounds:
continue
for longitude, latitude in intersection_geo_bounds.corner_points():
x, y = geo_map.long_lat_deg_to_coords( longitude_deg = longitude,
latitude_deg = latitude )
display_bounds.add_point( x = x, y = y )
continue
continue
return display_bounds
def view_box_to_geo_bounds_list( self, view_box : ViewBox ):
geo_bounds_list = list()
for geo_map in self._geo_map_list:
geo_bounds = GeoBounds()
for x, y in view_box.corner_points():
longitude, latitude = geo_map.coords_to_long_lat_deg( x = x, y = y )
geo_bounds.add_point( longitude = longitude, latitude = latitude )
continue
# If the long/lat form the projections do not fall inside the
# known bounds, then we can ignore it.
if not geo_map.geo_bounds.intersects( geo_bounds ):
continue
geo_bounds_list.append( geo_bounds )
continue
return geo_bounds_list
UsaContinentalCompositeGeoMap = CompositeGeoMap(
map_id = 1,
geo_map_list = [
USA_CONTINENTAL_GEO_MAP,
ALASKA_CONTINENTAL_GEO_MAP,
HAWAII_CONTINENTAL_GEO_MAP,
]
)
| 31.58159 | 118 | 0.624338 | from dataclasses import dataclass
import math
from typing import List
from .display_bounds import DisplayBounds
from .geo_bounds import GeoBounds
from .view_box import ViewBox
from . import utils
@dataclass
class AlbersMapProjection:
reference_longitude_deg : float
reference_latitude_deg : float
standard_parallel_1_deg : float
standard_parallel_2_deg : float
radius_miles = utils.EARTH_RADIUS_AT_EQUATOR_MILES
EPSILON = 0.000000001
@property
def reference_longitude_radians(self):
return math.radians( self.reference_longitude_deg )
@property
def reference_latitude_radians(self):
return math.radians( self.reference_latitude_deg )
@property
def standard_parallel_1_radians(self):
return math.radians( self.standard_parallel_1_deg )
@property
def standard_parallel_2_radians(self):
return math.radians( self.standard_parallel_2_deg )
def __post_init__(self):
self.n = 0.5 * ( math.sin( self.standard_parallel_1_radians )
+ math.sin( self.standard_parallel_2_radians ) )
self.C = ( math.cos( self.standard_parallel_1_radians ) ** 2 ) \
+ 2 * self.n * math.sin( self.standard_parallel_1_radians )
self.rho_0 = ( self.radius_miles / self.n ) \
* math.sqrt( self.C - ( 2 * self.n * math.sin( self.reference_latitude_radians ) ))
return
def x_y_from_deg( self, longitude_deg : float, latitude_deg : float ):
longitude = math.radians( longitude_deg )
latitude = math.radians( latitude_deg )
theta = self.n * ( longitude - self.reference_longitude_radians )
rho_basis = self.C - ( 2 * self.n * math.sin( latitude ))
if rho_basis < 0.0:
return ( 0, 0 )
rho = ( self.radius_miles / self.n ) * math.sqrt( rho_basis )
x = rho * math.sin( theta )
y = self.rho_0 - ( rho * math.cos( theta ))
return ( x, y )
def deg_from_x_y( self, x : float, y : float ):
rho_0_minus_y = self.rho_0 - y
rho = math.sqrt( x**2 + rho_0_minus_y**2 )
if abs(rho) > self.EPSILON:
if self.n < 0.0:
rho *= -1.0
x *= -1.0
rho_0_minus_y *= -1.0
rho_adjusted = rho * self.n / self.radius_miles
latitude_operand = ( self.C - ( rho_adjusted * rho_adjusted ) ) / ( 2 * self.n )
if abs(latitude_operand) <= 1.0:
latitude_radians = math.asin( latitude_operand )
elif latitude_operand < 0.0:
latitude_radians = -1.0 * math.pi / 2.0
else:
latitude_radians = math.pi / 2.0
theta = math.atan2( x, rho_0_minus_y )
else:
theta = 0.0
if self.n > 0:
latitude_radians = math.pi / 2.0
else:
latitude_radians = -1.0 * math.pi / 2.0
longitude_radians = self.reference_longitude_radians + ( theta / self.n )
longitude_deg = math.degrees( longitude_radians )
latitude_deg = math.degrees( latitude_radians )
return ( longitude_deg, latitude_deg )
@dataclass
class GeoMap:
projection : AlbersMapProjection
geo_bounds : GeoBounds
svg_template_name : str
view_box : ViewBox
display_x_offset : float = None
display_y_offset : float = None
display_x_scale : float = None
display_y_scale : float = None
rotation_angle_deg : float = None
calibration_points : List = None
def __post_init__(self):
self._rotation_angle_radians = None
self._sine_angle = None
self._cosine_angle = None
if self.rotation_angle_deg:
self._rotation_angle_radians = math.radians( self.rotation_angle_deg )
self._sine_angle = math.sin( self._rotation_angle_radians )
self._cosine_angle = math.cos( self._rotation_angle_radians )
return
@property
def aspect_ratio(self):
return self.view_box.width / self.view_box.height
def long_lat_deg_to_coords( self, longitude_deg, latitude_deg ):
projected_x, projected_y = self.projection.x_y_from_deg( longitude_deg = longitude_deg,
latitude_deg = latitude_deg )
if self._rotation_angle_radians:
rotated_x = ( projected_x * self._cosine_angle ) - ( projected_y * self._sine_angle )
rotated_y = ( projected_x * self._sine_angle ) + ( projected_y * self._cosine_angle )
scaled_x = rotated_x * self.display_x_scale
scaled_y = rotated_y * self.display_y_scale
else:
scaled_x = projected_x * self.display_x_scale
scaled_y = projected_y * self.display_y_scale
offset_x = scaled_x + self.display_x_offset
offset_y = self.display_y_offset - scaled_y
return ( offset_x , offset_y )
def coords_to_long_lat_deg( self, x, y ):
offset_x = x - self.display_x_offset
offset_y = self.display_y_offset - y
scaled_x = offset_x / self.display_x_scale
scaled_y = offset_y / self.display_y_scale
if self._rotation_angle_radians:
rotated_x = ( scaled_x * self._cosine_angle ) + ( scaled_y * self._sine_angle )
rotated_y = ( -1.0 * scaled_x * self._sine_angle ) + ( scaled_y * self._cosine_angle )
longitude, latitude = self.projection.deg_from_x_y( x = rotated_x, y = rotated_y )
else:
longitude, latitude = self.projection.deg_from_x_y( x = scaled_x, y = scaled_y )
return ( longitude, latitude )
USA_CONTINENTAL_PROJECTION = AlbersMapProjection(
reference_longitude_deg = -96.0,
reference_latitude_deg = 37.5,
standard_parallel_1_deg = 29.5,
standard_parallel_2_deg = 45.5,
)
ALASKA_PROJECTION = AlbersMapProjection(
reference_longitude_deg = -154.0,
reference_latitude_deg = 50.0,
standard_parallel_1_deg = 55.0,
standard_parallel_2_deg = 65.0,
)
HAWAII_PROJECTION = AlbersMapProjection(
reference_longitude_deg = -157.0,
reference_latitude_deg = 13.0,
standard_parallel_1_deg = 8.0,
standard_parallel_2_deg = 18.0,
)
USA_CONTINENTAL_GEO_MAP = GeoMap(
projection = USA_CONTINENTAL_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -124.8679,
longitude_max = -66.8628,
latitude_min = 24.3959,
latitude_max = 49.3877,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.3332,
display_y_scale = 0.3318,
display_x_offset = 491.0249,
display_y_offset = 323.6935,
)
ALASKA_CONTINENTAL_GEO_MAP = GeoMap(
projection = ALASKA_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -180.0,
longitude_max = -129.993,
latitude_min = 50.5,
latitude_max = 71.5232,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.1301,
display_y_scale = 0.1311,
display_x_offset = 132.4555,
display_y_offset = 638.5017,
rotation_angle_deg = -11.0,
)
HAWAII_CONTINENTAL_GEO_MAP = GeoMap(
projection = HAWAII_PROJECTION,
geo_bounds = GeoBounds(
longitude_min = -160.3922,
longitude_max = -154.6271,
latitude_min = 18.71,
latitude_max = 22.3386,
),
svg_template_name = "usa_continental.svg",
view_box = ViewBox(
x = 0.0,
y = 0.0,
width = 958.0,
height = 602.0,
),
display_x_scale = 0.3279,
display_y_scale = 0.3371,
display_x_offset = 325.5313,
display_y_offset = 729.5,
rotation_angle_deg = -0.5,
)
class CompositeGeoMap:
def __init__( self, map_id : int, geo_map_list : List[GeoMap] ):
assert( geo_map_list )
self._map_id = map_id
self._geo_map_list = geo_map_list
self._default_geo_map = self._geo_map_list[0]
self._geo_bounds = GeoBounds()
self._geo_bounds_list = list()
svg_template_name_set = set()
for geo_map in self._geo_map_list:
geo_map.view_box.map_id = self._map_id
self._geo_bounds.add_bounds( geo_map.geo_bounds )
self._geo_bounds_list.append( geo_map.geo_bounds )
svg_template_name_set.add( geo_map.svg_template_name )
continue
self._svg_template_name_list = list(svg_template_name_set)
return
@property
def map_id(self):
return self._map_id
@property
def geo_bounds(self):
return self._geo_bounds
@property
def geo_bounds_list(self):
return self._geo_bounds_list
@property
def default_view_box(self):
return self._default_geo_map.view_box
@property
def default_reference_longitude_deg(self):
return self._default_geo_map.projection.reference_longitude_deg
@property
def default_reference_latitude_deg(self):
return self._default_geo_map.projection.reference_latitude_deg
@property
def default_aspect_ratio(self):
return self._default_geo_map.aspect_ratio
@property
def svg_template_name_list(self):
return self._svg_template_name_list
def contains_bounds( self, geo_bounds : GeoBounds ):
for geo_map_bounds in self._geo_bounds_list:
if geo_map_bounds.contains_bounds( other_geo_bounds = geo_bounds ):
return True
continue
return False
def get_geo_map_for_point( self,
longitude_deg : float,
latitude_deg : float ):
for geo_map in self._geo_map_list:
if geo_map.geo_bounds.contains_point( longitude_deg = longitude_deg,
latitude_deg = latitude_deg ):
return geo_map
continue
return self._default_geo_map
def geo_bounds_to_display_bounds( self, geo_bounds : GeoBounds ):
display_bounds = DisplayBounds()
for geo_map in self._geo_map_list:
intersection_geo_bounds = geo_map.geo_bounds.intersect( geo_bounds )
if not intersection_geo_bounds:
continue
for longitude, latitude in intersection_geo_bounds.corner_points():
x, y = geo_map.long_lat_deg_to_coords( longitude_deg = longitude,
latitude_deg = latitude )
display_bounds.add_point( x = x, y = y )
continue
continue
return display_bounds
def view_box_to_geo_bounds_list( self, view_box : ViewBox ):
geo_bounds_list = list()
for geo_map in self._geo_map_list:
geo_bounds = GeoBounds()
for x, y in view_box.corner_points():
longitude, latitude = geo_map.coords_to_long_lat_deg( x = x, y = y )
geo_bounds.add_point( longitude = longitude, latitude = latitude )
continue
# If the long/lat form the projections do not fall inside the
# known bounds, then we can ignore it.
if not geo_map.geo_bounds.intersects( geo_bounds ):
continue
geo_bounds_list.append( geo_bounds )
continue
return geo_bounds_list
UsaContinentalCompositeGeoMap = CompositeGeoMap(
map_id = 1,
geo_map_list = [
USA_CONTINENTAL_GEO_MAP,
ALASKA_CONTINENTAL_GEO_MAP,
HAWAII_CONTINENTAL_GEO_MAP,
]
)
| true | true |
f7301234057765dcbe4ab1b5008caded738d56b6 | 5,879 | py | Python | src/sadie/airr/igblast/germline.py | jwillis0720/sadie | d289ae68f06f5698ee40ffc1757e1b8aa85f1175 | [
"MIT"
] | 9 | 2020-12-22T19:14:01.000Z | 2022-03-17T04:34:06.000Z | src/sadie/airr/igblast/germline.py | jwillis0720/sadie | d289ae68f06f5698ee40ffc1757e1b8aa85f1175 | [
"MIT"
] | 32 | 2020-12-28T07:46:44.000Z | 2022-03-31T01:25:01.000Z | src/sadie/airr/igblast/germline.py | jwillis0720/sadie | d289ae68f06f5698ee40ffc1757e1b8aa85f1175 | [
"MIT"
] | 2 | 2021-07-30T16:44:46.000Z | 2022-01-12T20:15:17.000Z | import os
import warnings
from pathlib import Path
# package/module level
from sadie.reference.reference import YamlRef
from sadie.airr.igblast.igblast import ensure_prefix_to
class GermlineData:
"""
The germline data paths are extremely cumbersome to workwith. This class will abstract away their paths to make it easier to fold into IgBLAST
Examples
--------
>>> gd = GermlineData('human')
>>> gd.base_dir
/Users/jwillis/repos/sadie/airr/data/germlines
>>> gd.v_gene_dir
/Users/jwillis/repos/sadie/airr/data/germlines/blastdb/Ig/human/human_V'
>>> gd.aux_path
/Users/jwillis/repos/sadie/airr/data/germlines/aux_data/human_gl.aux
"""
def __init__(
self,
species: str,
database: str = "imgt",
receptor: str = "Ig",
database_dir: str = None,
):
"""
Parameters
----------
species : str
The species of interest, e.g. human
receptor : str, optional
the receptor type, by default "Ig"
"""
self.species = species
if database_dir:
self.base_dir = Path(database_dir).absolute()
else:
self.base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/germlines"))
self.blast_dir = os.path.join(self.base_dir, f"{database}/{receptor}/blastdb/{species}_")
self.v_gene_dir = self.blast_dir + "V"
self.d_gene_dir = self.blast_dir + "D"
self.j_gene_dir = self.blast_dir + "J"
self.aux_path = os.path.join(self.base_dir, f"{database}/aux_db/{species}_gl.aux")
self.igdata = os.path.join(self.base_dir, f"{database}/{receptor}/")
@property
def base_dir(self) -> Path:
"""The base dir
Returns
-------
Path
The base directory path that contains all the germline data
"""
return self._base_dir
@base_dir.setter
def base_dir(self, directory: str):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"Base directory, {directory} not found")
self._base_dir = directory
@property
def blast_dir(self) -> Path:
return self._blast_dir
@blast_dir.setter
def blast_dir(self, directory: str):
# Must be a parent since this is not a valid path yet
_path = Path(directory).parent
if not _path.exists():
raise FileNotFoundError(f"Blast directory, {directory} not found")
self._blast_dir = directory
@property
def v_gene_dir(self) -> Path:
"""The V gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
human_V does not exists but it's the prefix to human_V.nod and other files used by blast
"""
return self._v_gene_dir
@v_gene_dir.setter
def v_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"V gene directory glob, {directory} not found")
self._v_gene_dir = _path
@property
def d_gene_dir(self) -> Path:
"""The D gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
ex: human_D does not exists but it's the prefix to human_D.nod and other files used by blast
"""
return self._d_gene_dir
@d_gene_dir.setter
def d_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
warnings.warn(f"D gene directory not found for {self.species}", UserWarning)
self._d_gene_dir = _path
@property
def j_gene_dir(self) -> Path:
"""The J gene directory prefix for the species of interest
Returns
-------
str
this is not a qualified path but a glob path.
ex: human_J does not exists but it's the prefix to human_j.nod and other files used by blast
"""
return self._j_gene_dir
@j_gene_dir.setter
def j_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._j_gene_dir = _path
@property
def aux_path(self) -> Path:
"""The auxillary data path used to reconstruct CDR3 regions.
Returns
-------
Path
the fully qualified path to the species auxilary data
ex:/Users/jwillis/repos/sadie/airr/data/germlines/aux_data/human_gl.aux
"""
return self._aux_path
@aux_path.setter
def aux_path(self, directory: str):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._aux_path = _path
@property
def igdata(self) -> Path:
return self._igdata
@igdata.setter
def igdata(self, directory: Path):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"IGDATA, {directory} not found")
self._igdata = _path
@staticmethod
def get_available_datasets() -> list:
"""A static non-instantiated method to get a list of avaialble species with the builtin data
Returns
-------
list
available datasets (common_name, custom|imgt, functional|all)
"""
y = YamlRef()
db_types = []
for database_type in y.yaml:
for common in y.yaml[database_type]:
if (common, database_type) not in db_types:
db_types.append((common, database_type))
return db_types
| 31.607527 | 146 | 0.610478 | import os
import warnings
from pathlib import Path
from sadie.reference.reference import YamlRef
from sadie.airr.igblast.igblast import ensure_prefix_to
class GermlineData:
def __init__(
self,
species: str,
database: str = "imgt",
receptor: str = "Ig",
database_dir: str = None,
):
self.species = species
if database_dir:
self.base_dir = Path(database_dir).absolute()
else:
self.base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/germlines"))
self.blast_dir = os.path.join(self.base_dir, f"{database}/{receptor}/blastdb/{species}_")
self.v_gene_dir = self.blast_dir + "V"
self.d_gene_dir = self.blast_dir + "D"
self.j_gene_dir = self.blast_dir + "J"
self.aux_path = os.path.join(self.base_dir, f"{database}/aux_db/{species}_gl.aux")
self.igdata = os.path.join(self.base_dir, f"{database}/{receptor}/")
@property
def base_dir(self) -> Path:
return self._base_dir
@base_dir.setter
def base_dir(self, directory: str):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"Base directory, {directory} not found")
self._base_dir = directory
@property
def blast_dir(self) -> Path:
return self._blast_dir
@blast_dir.setter
def blast_dir(self, directory: str):
_path = Path(directory).parent
if not _path.exists():
raise FileNotFoundError(f"Blast directory, {directory} not found")
self._blast_dir = directory
@property
def v_gene_dir(self) -> Path:
return self._v_gene_dir
@v_gene_dir.setter
def v_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"V gene directory glob, {directory} not found")
self._v_gene_dir = _path
@property
def d_gene_dir(self) -> Path:
return self._d_gene_dir
@d_gene_dir.setter
def d_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
warnings.warn(f"D gene directory not found for {self.species}", UserWarning)
self._d_gene_dir = _path
@property
def j_gene_dir(self) -> Path:
return self._j_gene_dir
@j_gene_dir.setter
def j_gene_dir(self, directory: str):
_path = Path(directory)
if not ensure_prefix_to(_path):
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._j_gene_dir = _path
@property
def aux_path(self) -> Path:
return self._aux_path
@aux_path.setter
def aux_path(self, directory: str):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"J gene directory glob, {directory} not found")
self._aux_path = _path
@property
def igdata(self) -> Path:
return self._igdata
@igdata.setter
def igdata(self, directory: Path):
_path = Path(directory)
if not _path.exists():
raise FileNotFoundError(f"IGDATA, {directory} not found")
self._igdata = _path
@staticmethod
def get_available_datasets() -> list:
y = YamlRef()
db_types = []
for database_type in y.yaml:
for common in y.yaml[database_type]:
if (common, database_type) not in db_types:
db_types.append((common, database_type))
return db_types
| true | true |
f7301314cf1642769e6fb9225052147fc23be5cb | 996 | py | Python | cluster.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | 1 | 2020-05-21T23:56:57.000Z | 2020-05-21T23:56:57.000Z | cluster.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | cluster.py | vgp314/Udacity-Arvato-Identify-Customer-Segments | 6be1d4f1eeac391c17c70fdf584bdc4813f80fd8 | [
"ADSL"
] | null | null | null | from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
def plot_clustering(data):
'''
Definition:
This function plot the squared error for the clustered points
args:
data to be clusterd
returns:
None
'''
cost =[]
max_clusters = 20
for i in range(2, max_clusters):
print("Analysing ", i, " clusters")
KM = MiniBatchKMeans(n_clusters = i,batch_size=20000)
KM.fit(data)
cost.append(KM.inertia_)
plt.plot(range(2, max_clusters), cost, color ='g', linewidth ='3')
plt.xlabel("Number of Clusters")
plt.ylabel("Squared Error (Cost)")
plt.show()
def do_clustering(data,number_clusters):
'''
Definition:
This function initizalize KMeans with number_clusters and fit to data
args:
data to be clustered, number_clusters
returns:
fitted K-Means mdel
'''
kmeans = KMeans(number_clusters)
fitted_model_k_means = kmeans.fit(data)
return fitted_model_k_means
| 21.652174 | 72 | 0.702811 | from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
def plot_clustering(data):
cost =[]
max_clusters = 20
for i in range(2, max_clusters):
print("Analysing ", i, " clusters")
KM = MiniBatchKMeans(n_clusters = i,batch_size=20000)
KM.fit(data)
cost.append(KM.inertia_)
plt.plot(range(2, max_clusters), cost, color ='g', linewidth ='3')
plt.xlabel("Number of Clusters")
plt.ylabel("Squared Error (Cost)")
plt.show()
def do_clustering(data,number_clusters):
kmeans = KMeans(number_clusters)
fitted_model_k_means = kmeans.fit(data)
return fitted_model_k_means
| true | true |
f730132751b666d6cb1f40177578d5b53a823707 | 4,202 | py | Python | Main.py | Serdobe/Markus | 725538666f81a11361b90e57fad2a00cb9888685 | [
"MIT"
] | null | null | null | Main.py | Serdobe/Markus | 725538666f81a11361b90e57fad2a00cb9888685 | [
"MIT"
] | null | null | null | Main.py | Serdobe/Markus | 725538666f81a11361b90e57fad2a00cb9888685 | [
"MIT"
] | null | null | null | # MAIN SCRIPT
"""
This script computes all the biological experiments. To run it is necessary to
load the Function_Files script that contains all the functions.
"""
import os
import multiprocessing
from multiprocessing import Pool
# Set work directory:
os.chdir(r"C:\Users\Sergio\Desktop\Markus_Project")
import Functions_Files
######################
# Yeast PPI Network: #
######################
# Arguments:
network = "systematic_PPI_BioGRID"
technique_1 = "GCV-all"
technique_2 = "GCV-O+"
technique_3 = "triangle"
technique_4 = "GDV"
enrichemnt_1 = "BP"
enrichemnt_2 = "CC"
enrichemnt_3 = "MF"
total_run = 10
# 1. Prepare the Data: #
# Technique "GCV_all":
# BP, CC, and MF:
sub_command1 = (network, technique_1, enrichemnt_1, 10)
sub_command2 = (network, technique_1, enrichemnt_2, 10)
sub_command3 = (network, technique_1, enrichemnt_3, 10)
# Technique "GCV-O+":
# BP, CC, and MF:
sub_command4 = (network, technique_2, enrichemnt_1, 10)
sub_command5 = (network, technique_2, enrichemnt_2, 10)
sub_command6 = (network, technique_2, enrichemnt_3, 10)
# Technique "triangle":
# BP, CC, and MF:
sub_command7 = (network, technique_3, enrichemnt_1, 10)
sub_command8 = (network, technique_3, enrichemnt_2, 10)
sub_command9 = (network, technique_3, enrichemnt_3, 10)
# Technique "GDV":
# BP, CC, and MF:
sub_command10 = (network, technique_4, enrichemnt_1, 10)
sub_command11 = (network, technique_4, enrichemnt_2, 10)
sub_command12 = (network, technique_4, enrichemnt_3, 10)
# Run the code:
Process = [sub_command1,sub_command2, sub_command3, sub_command4, sub_command5,
sub_command6, sub_command7,sub_command8,sub_command9, sub_command10,
sub_command11, sub_command12]
for arguments in Process:
Functions_Files.Load_Data(*arguments)
# 2. Prepare the PairWise Comparison (for the four techniques):
# For BP, CC, and MF:
# For each annotation all the four different techniques are compared:
sub_command1 = (network, enrichemnt_1, 10)
sub_command2 = (network, enrichemnt_2, 10)
sub_command3 = (network, enrichemnt_3, 10)
Process = [sub_command1,sub_command2, sub_command3]
for arguments in Process:
Functions_Files.Pair_Wise_GO_Comparison(*arguments)
# 3. Create the plots for the comparisons:
# Technique "GCV_all" VS "GCV-O+":
# BP, CC, and MF:
sub_command1 = (network, technique_1, technique_2, enrichemnt_1, 10)
sub_command2 = (network, technique_1, technique_2, enrichemnt_2, 10)
sub_command3 = (network, technique_1, technique_2, enrichemnt_3, 10)
# Technique "GCV_all" VS "triangle":
# BP, CC, and MF:
sub_command4 = (network, technique_1, technique_3, enrichemnt_1, 10)
sub_command5 = (network, technique_1, technique_3, enrichemnt_2, 10)
sub_command6 = (network, technique_1, technique_3, enrichemnt_3, 10)
# Technique "GCV_all" VS "GDV":
# BP, CC, and MF:
sub_command4 = (network, technique_1, technique_4, enrichemnt_1, 10)
sub_command5 = (network, technique_1, technique_4, enrichemnt_2, 10)
sub_command6 = (network, technique_1, technique_4, enrichemnt_3, 10)
# Technique "GCV-O+" VS "triangle":
# BP, CC, and MF:
sub_command7 = (network, technique_2, technique_3, enrichemnt_1, 10)
sub_command8 = (network, technique_2, technique_3, enrichemnt_2, 10)
sub_command9 = (network, technique_2, technique_3, enrichemnt_3, 10)
# Technique "GCV-O+" VS "GDV":
# BP, CC, and MF:
sub_command10 = (network, technique_2, technique_4, enrichemnt_1, 10)
sub_command11 = (network, technique_2, technique_4, enrichemnt_2, 10)
sub_command12 = (network, technique_2, technique_4, enrichemnt_3, 10)
# Technique "triangle" VS "GDV":
# BP, CC, and MF:
sub_command13 = (network, technique_3, technique_4, enrichemnt_1, 10)
sub_command14 = (network, technique_3, technique_4, enrichemnt_2, 10)
sub_command15 = (network, technique_3, technique_4, enrichemnt_3, 10)
Process = [sub_command1,sub_command2, sub_command3, sub_command4, sub_command5,
sub_command6, sub_command7,sub_command8,sub_command9, sub_command10,
sub_command11, sub_command12, sub_command13, sub_command14, sub_command15]
for arguments in Process:
Functions_Files.Main_Plot_Function(*arguments)
| 30.671533 | 85 | 0.73584 |
import os
import multiprocessing
from multiprocessing import Pool
os.chdir(r"C:\Users\Sergio\Desktop\Markus_Project")
import Functions_Files
5 = (network, technique_2, enrichemnt_2, 10)
sub_command6 = (network, technique_2, enrichemnt_3, 10)
sub_command7 = (network, technique_3, enrichemnt_1, 10)
sub_command8 = (network, technique_3, enrichemnt_2, 10)
sub_command9 = (network, technique_3, enrichemnt_3, 10)
sub_command10 = (network, technique_4, enrichemnt_1, 10)
sub_command11 = (network, technique_4, enrichemnt_2, 10)
sub_command12 = (network, technique_4, enrichemnt_3, 10)
Process = [sub_command1,sub_command2, sub_command3, sub_command4, sub_command5,
sub_command6, sub_command7,sub_command8,sub_command9, sub_command10,
sub_command11, sub_command12]
for arguments in Process:
Functions_Files.Load_Data(*arguments)
sub_command1 = (network, enrichemnt_1, 10)
sub_command2 = (network, enrichemnt_2, 10)
sub_command3 = (network, enrichemnt_3, 10)
Process = [sub_command1,sub_command2, sub_command3]
for arguments in Process:
Functions_Files.Pair_Wise_GO_Comparison(*arguments)
sub_command1 = (network, technique_1, technique_2, enrichemnt_1, 10)
sub_command2 = (network, technique_1, technique_2, enrichemnt_2, 10)
sub_command3 = (network, technique_1, technique_2, enrichemnt_3, 10)
sub_command4 = (network, technique_1, technique_3, enrichemnt_1, 10)
sub_command5 = (network, technique_1, technique_3, enrichemnt_2, 10)
sub_command6 = (network, technique_1, technique_3, enrichemnt_3, 10)
sub_command4 = (network, technique_1, technique_4, enrichemnt_1, 10)
sub_command5 = (network, technique_1, technique_4, enrichemnt_2, 10)
sub_command6 = (network, technique_1, technique_4, enrichemnt_3, 10)
sub_command7 = (network, technique_2, technique_3, enrichemnt_1, 10)
sub_command8 = (network, technique_2, technique_3, enrichemnt_2, 10)
sub_command9 = (network, technique_2, technique_3, enrichemnt_3, 10)
sub_command10 = (network, technique_2, technique_4, enrichemnt_1, 10)
sub_command11 = (network, technique_2, technique_4, enrichemnt_2, 10)
sub_command12 = (network, technique_2, technique_4, enrichemnt_3, 10)
sub_command13 = (network, technique_3, technique_4, enrichemnt_1, 10)
sub_command14 = (network, technique_3, technique_4, enrichemnt_2, 10)
sub_command15 = (network, technique_3, technique_4, enrichemnt_3, 10)
Process = [sub_command1,sub_command2, sub_command3, sub_command4, sub_command5,
sub_command6, sub_command7,sub_command8,sub_command9, sub_command10,
sub_command11, sub_command12, sub_command13, sub_command14, sub_command15]
for arguments in Process:
Functions_Files.Main_Plot_Function(*arguments)
| true | true |
f73013d5b899b1bc75fe9a03e46f38591a6f58b9 | 2,287 | py | Python | userbot/plugins/gbun.py | Aliensuniquebot/CatUserbot | 93561a620fc1198c6fe6c259412088f4bc81d97b | [
"MIT"
] | 1 | 2020-07-18T07:42:58.000Z | 2020-07-18T07:42:58.000Z | userbot/plugins/gbun.py | praveen368/CatUserbot | 4b0cd970551ffaf86b9fdd5da584c1b3882821ff | [
"MIT"
] | null | null | null | userbot/plugins/gbun.py | praveen368/CatUserbot | 4b0cd970551ffaf86b9fdd5da584c1b3882821ff | [
"MIT"
] | 2 | 2020-06-25T11:14:50.000Z | 2021-04-04T13:49:13.000Z | # This is a troll indeed ffs *facepalm*
import asyncio
from telethon import events
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="gbun"))
async def gbun(event):
if event.fwd_from:
return
gbunVar = event.text
gbunVar = gbunVar[6:]
mentions = "`Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\n`"
no_reason = "__Reason: Potential spammer. __"
await event.edit("**Summoning out le Gungnir ❗️⚜️☠️**")
asyncio.sleep(3.5)
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(reply_message.from_id))
firstname = replied_user.user.first_name
usname = replied_user.user.username
idd = reply_message.from_id
# make meself invulnerable cuz why not xD
if idd == 1118936839:
await reply_message.reply("`Wait a second, This is my master!`\n**How dare you threaten to ban my master nigger!**\n\n__Your account has been hacked! Pay 69$ to my master__ [✰Sᴀͥʀᴀͣᴛͫʜ™✰](tg://user?id=1118936839) __to release your account__😏")
else:
jnl=("`Warning!! `"
"[{}](tg://user?id={})"
"` 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\n\n`"
"**Rendi's Name: ** __{}__\n"
"**ID : ** `{}`\n"
).format(firstname, idd, firstname, idd)
if usname == None:
jnl += "**Victim Nigga's username: ** `Doesn't own a username!`\n"
elif usname != "None":
jnl += "**Victim Nigga's username** : @{}\n".format(usname)
if len(gbunVar) > 0:
gbunm = "`{}`".format(gbunVar)
gbunr = "**Reason: **"+gbunm
jnl += gbunr
else:
jnl += no_reason
await reply_message.reply(jnl)
else:
mention = "`Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\nReason: Potential spammer. `"
await event.reply(mention)
await event.delete()
| 42.351852 | 255 | 0.594666 |
import asyncio
from telethon import events
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import ChannelParticipantsAdmins
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="gbun"))
async def gbun(event):
if event.fwd_from:
return
gbunVar = event.text
gbunVar = gbunVar[6:]
mentions = "`Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\n`"
no_reason = "__Reason: Potential spammer. __"
await event.edit("**Summoning out le Gungnir ❗️⚜️☠️**")
asyncio.sleep(3.5)
chat = await event.get_input_chat()
async for x in borg.iter_participants(chat, filter=ChannelParticipantsAdmins):
mentions += f""
reply_message = None
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(reply_message.from_id))
firstname = replied_user.user.first_name
usname = replied_user.user.username
idd = reply_message.from_id
if idd == 1118936839:
await reply_message.reply("`Wait a second, This is my master!`\n**How dare you threaten to ban my master nigger!**\n\n__Your account has been hacked! Pay 69$ to my master__ [✰Sᴀͥʀᴀͣᴛͫʜ™✰](tg://user?id=1118936839) __to release your account__😏")
else:
jnl=("`Warning!! `"
"[{}](tg://user?id={})"
"` 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\n\n`"
"**Rendi's Name: ** __{}__\n"
"**ID : ** `{}`\n"
).format(firstname, idd, firstname, idd)
if usname == None:
jnl += "**Victim Nigga's username: ** `Doesn't own a username!`\n"
elif usname != "None":
jnl += "**Victim Nigga's username** : @{}\n".format(usname)
if len(gbunVar) > 0:
gbunm = "`{}`".format(gbunVar)
gbunr = "**Reason: **"+gbunm
jnl += gbunr
else:
jnl += no_reason
await reply_message.reply(jnl)
else:
mention = "`Warning!! User 𝙂𝘽𝘼𝙉𝙉𝙀𝘿 By Admin...\nReason: Potential spammer. `"
await event.reply(mention)
await event.delete()
| true | true |
f7301463f9c4beb4ba5e2fac1fb2efbd03eeb42b | 2,697 | py | Python | projects/PointRend/point_rend/coarse_mask_head.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | 1 | 2020-10-09T02:27:13.000Z | 2020-10-09T02:27:13.000Z | projects/PointRend/point_rend/coarse_mask_head.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | null | null | null | projects/PointRend/point_rend/coarse_mask_head.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from tkdet.layers import Conv2d
from tkdet.layers import ShapeSpec
from tkdet.models.roi_head.mask_head import MASK_HEAD_REGISTRY
from tkdet.utils import weight_init
__all__ = ["CoarseMaskHead"]
@MASK_HEAD_REGISTRY.register()
class CoarseMaskHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(CoarseMaskHead, self).__init__()
self.num_classes = cfg.MODEL.NUM_CLASSES
conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM
self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM
num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC
self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION
self.input_channels = input_shape.channels
self.input_h = input_shape.height
self.input_w = input_shape.width
self.conv_layers = []
if self.input_channels > conv_dim:
self.reduce_channel_dim_conv = Conv2d(
self.input_channels,
conv_dim,
kernel_size=1,
activation="ReLU"
)
self.conv_layers.append(self.reduce_channel_dim_conv)
self.reduce_spatial_dim_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=2,
stride=2,
padding=0,
bias=True,
activation="ReLU"
)
self.conv_layers.append(self.reduce_spatial_dim_conv)
input_dim = conv_dim * self.input_h * self.input_w
input_dim //= 4
self.fcs = []
for k in range(num_fc):
fc = nn.Linear(input_dim, self.fc_dim)
self.add_module("coarse_mask_fc{}".format(k + 1), fc)
self.fcs.append(fc)
input_dim = self.fc_dim
output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution
self.prediction = nn.Linear(self.fc_dim, output_dim)
nn.init.normal_(self.prediction.weight, std=0.001)
nn.init.constant_(self.prediction.bias, 0)
for layer in self.conv_layers:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x):
N = x.shape[0]
x = x.view(N, self.input_channels, self.input_h, self.input_w)
for layer in self.conv_layers:
x = layer(x)
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
x = F.relu(layer(x))
return self.prediction(x).view(
N,
self.num_classes,
self.output_side_resolution,
self.output_side_resolution
)
| 32.493976 | 97 | 0.622914 | import torch
import torch.nn as nn
import torch.nn.functional as F
from tkdet.layers import Conv2d
from tkdet.layers import ShapeSpec
from tkdet.models.roi_head.mask_head import MASK_HEAD_REGISTRY
from tkdet.utils import weight_init
__all__ = ["CoarseMaskHead"]
@MASK_HEAD_REGISTRY.register()
class CoarseMaskHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(CoarseMaskHead, self).__init__()
self.num_classes = cfg.MODEL.NUM_CLASSES
conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM
self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM
num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC
self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION
self.input_channels = input_shape.channels
self.input_h = input_shape.height
self.input_w = input_shape.width
self.conv_layers = []
if self.input_channels > conv_dim:
self.reduce_channel_dim_conv = Conv2d(
self.input_channels,
conv_dim,
kernel_size=1,
activation="ReLU"
)
self.conv_layers.append(self.reduce_channel_dim_conv)
self.reduce_spatial_dim_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=2,
stride=2,
padding=0,
bias=True,
activation="ReLU"
)
self.conv_layers.append(self.reduce_spatial_dim_conv)
input_dim = conv_dim * self.input_h * self.input_w
input_dim //= 4
self.fcs = []
for k in range(num_fc):
fc = nn.Linear(input_dim, self.fc_dim)
self.add_module("coarse_mask_fc{}".format(k + 1), fc)
self.fcs.append(fc)
input_dim = self.fc_dim
output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution
self.prediction = nn.Linear(self.fc_dim, output_dim)
nn.init.normal_(self.prediction.weight, std=0.001)
nn.init.constant_(self.prediction.bias, 0)
for layer in self.conv_layers:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
def forward(self, x):
N = x.shape[0]
x = x.view(N, self.input_channels, self.input_h, self.input_w)
for layer in self.conv_layers:
x = layer(x)
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
x = F.relu(layer(x))
return self.prediction(x).view(
N,
self.num_classes,
self.output_side_resolution,
self.output_side_resolution
)
| true | true |
f730147e7eb4322f1a6d2019bd8d168aab36bec1 | 3,924 | py | Python | adiscorduser/settings/mock_prod.py | ADiscordUser/adiscorduser-site | af6eb58ab528fddba82b8c65bfdd06b6663333cc | [
"MIT"
] | 2 | 2021-03-06T02:21:35.000Z | 2021-03-06T09:34:57.000Z | adiscorduser/settings/mock_prod.py | ADiscordUser/adiscorduser-site | af6eb58ab528fddba82b8c65bfdd06b6663333cc | [
"MIT"
] | null | null | null | adiscorduser/settings/mock_prod.py | ADiscordUser/adiscorduser-site | af6eb58ab528fddba82b8c65bfdd06b6663333cc | [
"MIT"
] | null | null | null | """
Django settings for adiscorduser project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'core.apps.CoreConfig',
'uploader.apps.UploaderConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'adiscorduser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adiscorduser.wsgi.application'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'core.drf.APIPagination',
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication'
],
'ORDERING_PARAM': 'order'
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Authentication backend
AUTH_USER_MODEL = "core.User"
# Media
MEDIA_ROOT = ""
MEDIA = {
"image": {
"url": "",
"mime_types": ["image/png", "image/gif", "image/jpeg"]
},
"video": {
"url": "",
"mime_types": ["video/webm", "video/mp4"]
}
}
# Cloudflare
CLOUDFLARE = {
"ZONE_IDENTIFIER": "",
"API_KEY": "",
"PROD_HOST": ALLOWED_HOSTS # you could also make this a list
} | 23.926829 | 91 | 0.66947 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = ''
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'core.apps.CoreConfig',
'uploader.apps.UploaderConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'adiscorduser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adiscorduser.wsgi.application'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'core.drf.APIPagination',
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication'
],
'ORDERING_PARAM': 'order'
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
# Authentication backend
AUTH_USER_MODEL = "core.User"
# Media
MEDIA_ROOT = ""
MEDIA = {
"image": {
"url": "",
"mime_types": ["image/png", "image/gif", "image/jpeg"]
},
"video": {
"url": "",
"mime_types": ["video/webm", "video/mp4"]
}
}
# Cloudflare
CLOUDFLARE = {
"ZONE_IDENTIFIER": "",
"API_KEY": "",
"PROD_HOST": ALLOWED_HOSTS # you could also make this a list
} | true | true |
f73014c18015c10c4a57a92b8fba96062dd8c405 | 110 | py | Python | runtest.py | krerkkiat/word-fusion | 54074539b5255830b700c5de185e6dded8f3aec4 | [
"MIT"
] | null | null | null | runtest.py | krerkkiat/word-fusion | 54074539b5255830b700c5de185e6dded8f3aec4 | [
"MIT"
] | null | null | null | runtest.py | krerkkiat/word-fusion | 54074539b5255830b700c5de185e6dded8f3aec4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''A module to run testcaes.'''
import unittest
from tests import *
unittest.main()
| 13.75 | 31 | 0.645455 |
import unittest
from tests import *
unittest.main()
| true | true |
f7301503bf0efbff166667ede074659aa5f11e70 | 391 | py | Python | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | """
WSGI config for Twitter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Twitter.settings")
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Twitter.settings")
application = get_wsgi_application()
| true | true |
f730151b01f0716512edaac7c34300146b453531 | 5,477 | py | Python | docs/conf.py | agaveplatform/agavepy | c2b39fdf60648f7025234f7aa175af873ef0ff75 | [
"BSD-3-Clause"
] | 16 | 2015-07-24T16:54:23.000Z | 2020-10-18T23:10:37.000Z | docs/conf.py | agaveplatform/agavepy | c2b39fdf60648f7025234f7aa175af873ef0ff75 | [
"BSD-3-Clause"
] | 77 | 2015-06-11T22:08:10.000Z | 2020-09-09T19:25:27.000Z | docs/conf.py | agaveplatform/agavepy | c2b39fdf60648f7025234f7aa175af873ef0ff75 | [
"BSD-3-Clause"
] | 24 | 2015-11-05T20:32:48.000Z | 2022-01-26T22:05:53.000Z | # -*- coding: utf-8 -*-
#
# AgavePy documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 5 11:08:11 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AgavePy'
copyright = u'2018- Texas Advanced Computing Center'
author = u'Texas Advanced Computing Center'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Implement
# https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html'
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AgavePydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AgavePy.tex', u'AgavePy Documentation',
u'Joe Stubbs, Walter Moreira, Matt Vaughn', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agavepy', u'AgavePy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AgavePy', u'AgavePy Documentation',
author, 'AgavePy', 'One line description of project.',
'Miscellaneous'),
]
| 30.427778 | 79 | 0.680117 |
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'AgavePy'
copyright = u'2018- Texas Advanced Computing Center'
author = u'Texas Advanced Computing Center'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Implement
# https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html'
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AgavePydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AgavePy.tex', u'AgavePy Documentation',
u'Joe Stubbs, Walter Moreira, Matt Vaughn', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agavepy', u'AgavePy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AgavePy', u'AgavePy Documentation',
author, 'AgavePy', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f73017628abc5a648c3b0d3f76094d50e2de040a | 8,111 | py | Python | contrib/trainer/dream_tf/layers/policy_head.py | Chicoryn/dream-go | 6a4b71d7e1fcc28110ba859c0a2b59c10041c083 | [
"Apache-2.0"
] | 46 | 2017-12-08T01:40:08.000Z | 2022-02-07T12:56:14.000Z | contrib/trainer/dream_tf/layers/policy_head.py | Chicoryn/dream-go | 6a4b71d7e1fcc28110ba859c0a2b59c10041c083 | [
"Apache-2.0"
] | 56 | 2017-12-28T04:00:31.000Z | 2022-03-20T12:39:39.000Z | contrib/trainer/dream_tf/layers/policy_head.py | Chicoryn/dream-go | 6a4b71d7e1fcc28110ba859c0a2b59c10041c083 | [
"Apache-2.0"
] | 8 | 2018-02-01T13:12:32.000Z | 2020-05-11T04:12:25.000Z | # Copyright (c) 2019 Karl Sundequist Blomdahl <karl.sundequist.blomdahl@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from .batch_norm import batch_norm_conv2d
from .dense import dense
from .recompute_grad import recompute_grad
def policy_head(x, mode, params):
"""
The policy head attached after the residual blocks as described by DeepMind:
1. A convolution of 8 filters of kernel size 3 × 3 with stride 1
2. Batch normalisation
3. A rectifier non-linearity
4. A fully connected linear layer that outputs a vector of size 19²+1 = 362
corresponding to logit probabilities for all intersections and the pass
move
"""
num_channels = params['num_channels']
num_samples = params['num_samples']
def _forward(x, is_recomputing=False):
""" Returns the result of the forward inference pass on `x` """
y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)
y = tf.nn.relu(y)
y = tf.reshape(y, (-1, 361 * num_samples))
y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)
return tf.cast(y, tf.float32)
return recompute_grad(_forward)(x)
def policy_offset_op(shape, dtype=None, partition_info=None):
""" Initial value for the policy offset, this should roughly correspond to
the log probability of each move being played. """
return np.array([
-7.93991e+00, -6.91853e+00, -6.86255e+00, -6.78094e+00, -6.79361e+00, -6.75976e+00,
-6.88288e+00, -6.90817e+00, -6.93508e+00, -6.92374e+00, -6.91856e+00, -6.91075e+00,
-6.87607e+00, -6.75246e+00, -6.79823e+00, -6.80791e+00, -6.86863e+00, -6.89708e+00,
-7.93729e+00, -6.95779e+00, -6.11830e+00, -5.85974e+00, -5.83566e+00, -5.81966e+00,
-5.84875e+00, -5.90686e+00, -5.97848e+00, -5.99648e+00, -5.99342e+00, -5.99524e+00,
-5.96306e+00, -5.88135e+00, -5.83725e+00, -5.81963e+00, -5.84671e+00, -5.85574e+00,
-6.07402e+00, -6.89741e+00, -6.91472e+00, -5.87616e+00, -5.51456e+00, -5.48398e+00,
-5.55522e+00, -5.49329e+00, -5.70271e+00, -5.65749e+00, -5.70621e+00, -5.68975e+00,
-5.69774e+00, -5.66463e+00, -5.68246e+00, -5.43859e+00, -5.59398e+00, -5.44977e+00,
-5.45890e+00, -5.81432e+00, -6.85663e+00, -6.83055e+00, -5.84429e+00, -5.40160e+00,
-5.34049e+00, -5.66119e+00, -5.62512e+00, -5.71932e+00, -5.72455e+00, -5.70309e+00,
-5.69903e+00, -5.70189e+00, -5.71451e+00, -5.68138e+00, -5.59716e+00, -5.64521e+00,
-5.29867e+00, -5.42794e+00, -5.80074e+00, -6.80807e+00, -6.81930e+00, -5.82896e+00,
-5.63177e+00, -5.67078e+00, -5.93261e+00, -5.78339e+00, -5.80250e+00, -5.78522e+00,
-5.79703e+00, -5.79409e+00, -5.79848e+00, -5.78746e+00, -5.77879e+00, -5.76154e+00,
-5.94899e+00, -5.67992e+00, -5.59753e+00, -5.78787e+00, -6.79474e+00, -6.79318e+00,
-5.85460e+00, -5.47365e+00, -5.60804e+00, -5.79080e+00, -5.80699e+00, -5.80015e+00,
-5.81436e+00, -5.81617e+00, -5.80918e+00, -5.81150e+00, -5.80510e+00, -5.77611e+00,
-5.78804e+00, -5.76476e+00, -5.58303e+00, -5.41241e+00, -5.83056e+00, -6.78050e+00,
-6.88840e+00, -5.91061e+00, -5.69064e+00, -5.71108e+00, -5.79579e+00, -5.80311e+00,
-5.81472e+00, -5.81526e+00, -5.81671e+00, -5.81616e+00, -5.81570e+00, -5.80513e+00,
-5.79622e+00, -5.77254e+00, -5.77513e+00, -5.67571e+00, -5.67228e+00, -5.89279e+00,
-6.86025e+00, -6.91154e+00, -5.97718e+00, -5.66273e+00, -5.72542e+00, -5.78770e+00,
-5.81699e+00, -5.81516e+00, -5.81869e+00, -5.81941e+00, -5.81940e+00, -5.81482e+00,
-5.80754e+00, -5.79365e+00, -5.78832e+00, -5.75882e+00, -5.70202e+00, -5.63253e+00,
-5.94600e+00, -6.88401e+00, -6.91774e+00, -5.99960e+00, -5.70958e+00, -5.70386e+00,
-5.80010e+00, -5.81106e+00, -5.81648e+00, -5.81789e+00, -5.81997e+00, -5.81948e+00,
-5.81279e+00, -5.80583e+00, -5.80135e+00, -5.78998e+00, -5.77203e+00, -5.68193e+00,
-5.67815e+00, -5.96948e+00, -6.88898e+00, -6.91699e+00, -5.99684e+00, -5.69323e+00,
-5.68440e+00, -5.79516e+00, -5.81060e+00, -5.81611e+00, -5.81406e+00, -5.81620e+00,
-5.80901e+00, -5.81298e+00, -5.80653e+00, -5.79696e+00, -5.78196e+00, -5.76473e+00,
-5.65428e+00, -5.66398e+00, -5.96876e+00, -6.89641e+00, -6.92151e+00, -5.99694e+00,
-5.71110e+00, -5.71325e+00, -5.79821e+00, -5.80778e+00, -5.81212e+00, -5.81205e+00,
-5.81020e+00, -5.81116e+00, -5.80801e+00, -5.79830e+00, -5.79276e+00, -5.78653e+00,
-5.77101e+00, -5.68899e+00, -5.69274e+00, -5.97098e+00, -6.90131e+00, -6.89817e+00,
-5.95772e+00, -5.64660e+00, -5.72654e+00, -5.77678e+00, -5.80212e+00, -5.80607e+00,
-5.80127e+00, -5.80551e+00, -5.80743e+00, -5.80042e+00, -5.79346e+00, -5.79025e+00,
-5.78733e+00, -5.75338e+00, -5.69506e+00, -5.63437e+00, -5.95747e+00, -6.88818e+00,
-6.86408e+00, -5.86964e+00, -5.67686e+00, -5.70769e+00, -5.79369e+00, -5.78719e+00,
-5.79913e+00, -5.80025e+00, -5.80054e+00, -5.80132e+00, -5.79529e+00, -5.78667e+00,
-5.78821e+00, -5.76922e+00, -5.76675e+00, -5.69570e+00, -5.68074e+00, -5.90285e+00,
-6.86338e+00, -6.76061e+00, -5.80263e+00, -5.41706e+00, -5.58843e+00, -5.78328e+00,
-5.79366e+00, -5.78934e+00, -5.79841e+00, -5.79591e+00, -5.79041e+00, -5.79060e+00,
-5.78705e+00, -5.78000e+00, -5.77674e+00, -5.75681e+00, -5.57623e+00, -5.50113e+00,
-5.85626e+00, -6.78012e+00, -6.79139e+00, -5.80594e+00, -5.58041e+00, -5.65286e+00,
-5.94338e+00, -5.77647e+00, -5.78968e+00, -5.77167e+00, -5.78232e+00, -5.76841e+00,
-5.77241e+00, -5.75895e+00, -5.78530e+00, -5.76951e+00, -5.88238e+00, -5.64461e+00,
-5.61617e+00, -5.82903e+00, -6.80791e+00, -6.81286e+00, -5.84175e+00, -5.48596e+00,
-5.28293e+00, -5.71807e+00, -5.60505e+00, -5.71724e+00, -5.70963e+00, -5.68757e+00,
-5.65039e+00, -5.67046e+00, -5.68983e+00, -5.69079e+00, -5.58636e+00, -5.60082e+00,
-5.39104e+00, -5.38788e+00, -5.85818e+00, -6.81584e+00, -6.83461e+00, -5.85197e+00,
-5.47331e+00, -5.40193e+00, -5.63715e+00, -5.47135e+00, -5.68295e+00, -5.64977e+00,
-5.67997e+00, -5.64680e+00, -5.67367e+00, -5.61327e+00, -5.67216e+00, -5.50078e+00,
-5.53072e+00, -5.40751e+00, -5.52960e+00, -5.87713e+00, -6.89602e+00, -6.89446e+00,
-6.07997e+00, -5.83860e+00, -5.78284e+00, -5.77460e+00, -5.81606e+00, -5.88522e+00,
-5.95163e+00, -5.97232e+00, -5.95954e+00, -5.96527e+00, -5.94048e+00, -5.88465e+00,
-5.82810e+00, -5.82003e+00, -5.84255e+00, -5.88531e+00, -6.11968e+00, -6.92480e+00,
-7.88397e+00, -6.89418e+00, -6.83908e+00, -6.78821e+00, -6.75784e+00, -6.75053e+00,
-6.85545e+00, -6.88249e+00, -6.88945e+00, -6.88525e+00, -6.88876e+00, -6.86828e+00,
-6.83631e+00, -6.75981e+00, -6.76317e+00, -6.74771e+00, -6.86408e+00, -6.90874e+00,
-7.91371e+00, -6.27113e+00
])
| 66.483607 | 122 | 0.633461 |
import numpy as np
import tensorflow as tf
from .batch_norm import batch_norm_conv2d
from .dense import dense
from .recompute_grad import recompute_grad
def policy_head(x, mode, params):
num_channels = params['num_channels']
num_samples = params['num_samples']
def _forward(x, is_recomputing=False):
y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)
y = tf.nn.relu(y)
y = tf.reshape(y, (-1, 361 * num_samples))
y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)
return tf.cast(y, tf.float32)
return recompute_grad(_forward)(x)
def policy_offset_op(shape, dtype=None, partition_info=None):
return np.array([
-7.93991e+00, -6.91853e+00, -6.86255e+00, -6.78094e+00, -6.79361e+00, -6.75976e+00,
-6.88288e+00, -6.90817e+00, -6.93508e+00, -6.92374e+00, -6.91856e+00, -6.91075e+00,
-6.87607e+00, -6.75246e+00, -6.79823e+00, -6.80791e+00, -6.86863e+00, -6.89708e+00,
-7.93729e+00, -6.95779e+00, -6.11830e+00, -5.85974e+00, -5.83566e+00, -5.81966e+00,
-5.84875e+00, -5.90686e+00, -5.97848e+00, -5.99648e+00, -5.99342e+00, -5.99524e+00,
-5.96306e+00, -5.88135e+00, -5.83725e+00, -5.81963e+00, -5.84671e+00, -5.85574e+00,
-6.07402e+00, -6.89741e+00, -6.91472e+00, -5.87616e+00, -5.51456e+00, -5.48398e+00,
-5.55522e+00, -5.49329e+00, -5.70271e+00, -5.65749e+00, -5.70621e+00, -5.68975e+00,
-5.69774e+00, -5.66463e+00, -5.68246e+00, -5.43859e+00, -5.59398e+00, -5.44977e+00,
-5.45890e+00, -5.81432e+00, -6.85663e+00, -6.83055e+00, -5.84429e+00, -5.40160e+00,
-5.34049e+00, -5.66119e+00, -5.62512e+00, -5.71932e+00, -5.72455e+00, -5.70309e+00,
-5.69903e+00, -5.70189e+00, -5.71451e+00, -5.68138e+00, -5.59716e+00, -5.64521e+00,
-5.29867e+00, -5.42794e+00, -5.80074e+00, -6.80807e+00, -6.81930e+00, -5.82896e+00,
-5.63177e+00, -5.67078e+00, -5.93261e+00, -5.78339e+00, -5.80250e+00, -5.78522e+00,
-5.79703e+00, -5.79409e+00, -5.79848e+00, -5.78746e+00, -5.77879e+00, -5.76154e+00,
-5.94899e+00, -5.67992e+00, -5.59753e+00, -5.78787e+00, -6.79474e+00, -6.79318e+00,
-5.85460e+00, -5.47365e+00, -5.60804e+00, -5.79080e+00, -5.80699e+00, -5.80015e+00,
-5.81436e+00, -5.81617e+00, -5.80918e+00, -5.81150e+00, -5.80510e+00, -5.77611e+00,
-5.78804e+00, -5.76476e+00, -5.58303e+00, -5.41241e+00, -5.83056e+00, -6.78050e+00,
-6.88840e+00, -5.91061e+00, -5.69064e+00, -5.71108e+00, -5.79579e+00, -5.80311e+00,
-5.81472e+00, -5.81526e+00, -5.81671e+00, -5.81616e+00, -5.81570e+00, -5.80513e+00,
-5.79622e+00, -5.77254e+00, -5.77513e+00, -5.67571e+00, -5.67228e+00, -5.89279e+00,
-6.86025e+00, -6.91154e+00, -5.97718e+00, -5.66273e+00, -5.72542e+00, -5.78770e+00,
-5.81699e+00, -5.81516e+00, -5.81869e+00, -5.81941e+00, -5.81940e+00, -5.81482e+00,
-5.80754e+00, -5.79365e+00, -5.78832e+00, -5.75882e+00, -5.70202e+00, -5.63253e+00,
-5.94600e+00, -6.88401e+00, -6.91774e+00, -5.99960e+00, -5.70958e+00, -5.70386e+00,
-5.80010e+00, -5.81106e+00, -5.81648e+00, -5.81789e+00, -5.81997e+00, -5.81948e+00,
-5.81279e+00, -5.80583e+00, -5.80135e+00, -5.78998e+00, -5.77203e+00, -5.68193e+00,
-5.67815e+00, -5.96948e+00, -6.88898e+00, -6.91699e+00, -5.99684e+00, -5.69323e+00,
-5.68440e+00, -5.79516e+00, -5.81060e+00, -5.81611e+00, -5.81406e+00, -5.81620e+00,
-5.80901e+00, -5.81298e+00, -5.80653e+00, -5.79696e+00, -5.78196e+00, -5.76473e+00,
-5.65428e+00, -5.66398e+00, -5.96876e+00, -6.89641e+00, -6.92151e+00, -5.99694e+00,
-5.71110e+00, -5.71325e+00, -5.79821e+00, -5.80778e+00, -5.81212e+00, -5.81205e+00,
-5.81020e+00, -5.81116e+00, -5.80801e+00, -5.79830e+00, -5.79276e+00, -5.78653e+00,
-5.77101e+00, -5.68899e+00, -5.69274e+00, -5.97098e+00, -6.90131e+00, -6.89817e+00,
-5.95772e+00, -5.64660e+00, -5.72654e+00, -5.77678e+00, -5.80212e+00, -5.80607e+00,
-5.80127e+00, -5.80551e+00, -5.80743e+00, -5.80042e+00, -5.79346e+00, -5.79025e+00,
-5.78733e+00, -5.75338e+00, -5.69506e+00, -5.63437e+00, -5.95747e+00, -6.88818e+00,
-6.86408e+00, -5.86964e+00, -5.67686e+00, -5.70769e+00, -5.79369e+00, -5.78719e+00,
-5.79913e+00, -5.80025e+00, -5.80054e+00, -5.80132e+00, -5.79529e+00, -5.78667e+00,
-5.78821e+00, -5.76922e+00, -5.76675e+00, -5.69570e+00, -5.68074e+00, -5.90285e+00,
-6.86338e+00, -6.76061e+00, -5.80263e+00, -5.41706e+00, -5.58843e+00, -5.78328e+00,
-5.79366e+00, -5.78934e+00, -5.79841e+00, -5.79591e+00, -5.79041e+00, -5.79060e+00,
-5.78705e+00, -5.78000e+00, -5.77674e+00, -5.75681e+00, -5.57623e+00, -5.50113e+00,
-5.85626e+00, -6.78012e+00, -6.79139e+00, -5.80594e+00, -5.58041e+00, -5.65286e+00,
-5.94338e+00, -5.77647e+00, -5.78968e+00, -5.77167e+00, -5.78232e+00, -5.76841e+00,
-5.77241e+00, -5.75895e+00, -5.78530e+00, -5.76951e+00, -5.88238e+00, -5.64461e+00,
-5.61617e+00, -5.82903e+00, -6.80791e+00, -6.81286e+00, -5.84175e+00, -5.48596e+00,
-5.28293e+00, -5.71807e+00, -5.60505e+00, -5.71724e+00, -5.70963e+00, -5.68757e+00,
-5.65039e+00, -5.67046e+00, -5.68983e+00, -5.69079e+00, -5.58636e+00, -5.60082e+00,
-5.39104e+00, -5.38788e+00, -5.85818e+00, -6.81584e+00, -6.83461e+00, -5.85197e+00,
-5.47331e+00, -5.40193e+00, -5.63715e+00, -5.47135e+00, -5.68295e+00, -5.64977e+00,
-5.67997e+00, -5.64680e+00, -5.67367e+00, -5.61327e+00, -5.67216e+00, -5.50078e+00,
-5.53072e+00, -5.40751e+00, -5.52960e+00, -5.87713e+00, -6.89602e+00, -6.89446e+00,
-6.07997e+00, -5.83860e+00, -5.78284e+00, -5.77460e+00, -5.81606e+00, -5.88522e+00,
-5.95163e+00, -5.97232e+00, -5.95954e+00, -5.96527e+00, -5.94048e+00, -5.88465e+00,
-5.82810e+00, -5.82003e+00, -5.84255e+00, -5.88531e+00, -6.11968e+00, -6.92480e+00,
-7.88397e+00, -6.89418e+00, -6.83908e+00, -6.78821e+00, -6.75784e+00, -6.75053e+00,
-6.85545e+00, -6.88249e+00, -6.88945e+00, -6.88525e+00, -6.88876e+00, -6.86828e+00,
-6.83631e+00, -6.75981e+00, -6.76317e+00, -6.74771e+00, -6.86408e+00, -6.90874e+00,
-7.91371e+00, -6.27113e+00
])
| true | true |
f73017774feaf0d8d33dce061c23d77e28fb2da8 | 1,072 | py | Python | aidistillery/models/fasttext_wrapper.py | TheMTank/arxiv-summariser | db4f1e42bcc9185e197a00a18e280a4a3011453c | [
"MIT"
] | 17 | 2018-11-26T23:06:20.000Z | 2022-01-18T21:43:17.000Z | aidistillery/models/fasttext_wrapper.py | TheMTank/arxiv-summariser | db4f1e42bcc9185e197a00a18e280a4a3011453c | [
"MIT"
] | 3 | 2018-11-27T12:17:20.000Z | 2019-02-05T11:40:44.000Z | aidistillery/models/fasttext_wrapper.py | TheMTank/arxiv-summariser | db4f1e42bcc9185e197a00a18e280a4a3011453c | [
"MIT"
] | 3 | 2019-03-06T10:14:08.000Z | 2020-01-21T17:26:20.000Z | import logging
from gensim.models import fasttext
from aidistillery import file_handling
class FastTextWrapper:
def __init__(self, sentences, use_bf = True, dimension=100, window=5, min_count=5, workers=4, sg=0, iterations=5,
type="fasttext", dataset = ""):
logging.info("FastText Wrapper Initialized")
self.sentences = sentences
self.type = type
self.dataset = dataset
self.use_bf = use_bf
self.dimension = dimension
self.window = window
self.min_count = min_count
self.workers = workers
self.sg = sg
self.iterations = iterations
def fit(self):
model = fasttext.FastText(self.sentences,
size=self.dimension,
window=self.window,
min_count=self.min_count,
workers=self.workers,
sg=self.sg,
iter=self.iterations)
if not self.use_bf:
return model
else:
bf_format = file_handling.BF().load_from_gensim(model)
return bf_format
| 29.777778 | 117 | 0.606343 | import logging
from gensim.models import fasttext
from aidistillery import file_handling
class FastTextWrapper:
def __init__(self, sentences, use_bf = True, dimension=100, window=5, min_count=5, workers=4, sg=0, iterations=5,
type="fasttext", dataset = ""):
logging.info("FastText Wrapper Initialized")
self.sentences = sentences
self.type = type
self.dataset = dataset
self.use_bf = use_bf
self.dimension = dimension
self.window = window
self.min_count = min_count
self.workers = workers
self.sg = sg
self.iterations = iterations
def fit(self):
model = fasttext.FastText(self.sentences,
size=self.dimension,
window=self.window,
min_count=self.min_count,
workers=self.workers,
sg=self.sg,
iter=self.iterations)
if not self.use_bf:
return model
else:
bf_format = file_handling.BF().load_from_gensim(model)
return bf_format
| true | true |
f730187437fb8528dba378747c162f34dd4029e0 | 270 | py | Python | subprocess_audio_scripts/success_bad_form.py | colesteere/PT-Exercise-Feedback | 974b6e441bc096d691bd4abd490cfab165560512 | [
"Apache-2.0"
] | null | null | null | subprocess_audio_scripts/success_bad_form.py | colesteere/PT-Exercise-Feedback | 974b6e441bc096d691bd4abd490cfab165560512 | [
"Apache-2.0"
] | null | null | null | subprocess_audio_scripts/success_bad_form.py | colesteere/PT-Exercise-Feedback | 974b6e441bc096d691bd4abd490cfab165560512 | [
"Apache-2.0"
] | null | null | null | import pyttsx3
# from run_bicep_curl import bicepCount
import sys
engine = pyttsx3.init()
voices = engine.getProperty("voices")
engine.setProperty("rate", 165)
engine.setProperty("voice", "english-us")
engine.say("Number {}.".format(sys.argv[1]))
engine.runAndWait() | 20.769231 | 44 | 0.748148 | import pyttsx3
import sys
engine = pyttsx3.init()
voices = engine.getProperty("voices")
engine.setProperty("rate", 165)
engine.setProperty("voice", "english-us")
engine.say("Number {}.".format(sys.argv[1]))
engine.runAndWait() | true | true |
f73019c9ad078a6db358a139a1c9a8db4ff33165 | 2,259 | py | Python | newdust/graindist/composition/cmdrude.py | eblur/newdust | 7e843ae2604a844826606ea04c459694fdd5c178 | [
"BSD-2-Clause"
] | 4 | 2018-02-04T19:04:01.000Z | 2022-02-09T04:11:18.000Z | newdust/graindist/composition/cmdrude.py | eblur/newdust | 7e843ae2604a844826606ea04c459694fdd5c178 | [
"BSD-2-Clause"
] | 21 | 2017-08-15T21:13:42.000Z | 2021-12-23T20:07:24.000Z | newdust/graindist/composition/cmdrude.py | eblur/newdust | 7e843ae2604a844826606ea04c459694fdd5c178 | [
"BSD-2-Clause"
] | 1 | 2021-01-28T18:29:12.000Z | 2021-01-28T18:29:12.000Z | import numpy as np
from newdust import constants as c
__all__ = ['CmDrude']
RHO_DRUDE = 3.0 # g cm^-3
LAM_MAX = c.hc / 0.01 # maximal wavelength that we will allow for RG-Drude
class CmDrude(object):
"""
| **ATTRIBUTES**
| cmtype : 'Drude'
| rho : grain density [g cm^-3]
| citation : A string containing citation to original work
|
| *functions*
| rp(lam, unit='kev') : Returns real part (unit='kev'|'angs')
| ip(lam, unit='kev') : Returns imaginary part (always 0.0)
| cm(lam, unit='kev') : Complex index of refraction of dtype='complex'
| plot(lam, unit='kev') : Plots Re(m-1)
"""
def __init__(self, rho=RHO_DRUDE): # Returns a CM using the Drude approximation
self.cmtype = 'Drude'
self.rho = rho
self.citation = "Using the Drude approximation.\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)"
def rp(self, lam, unit='kev'):
assert unit in c.ALLOWED_LAM_UNITS
lam_cm = c._lam_cm(lam, unit)
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
return mm1 + 1.0
'''# Returns 1 if the wavelength supplied is too low energy (i.e. inappropriate for applying Drude)
mm1 = np.zeros(np.size(lam_cm))
if (np.size(lam_cm) == 1):
if lam_cm >= LAM_MAX:
pass
else:
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
else:
ii = (lam_cm <= LAM_MAX)
mm1[ii] = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm[ii], 2)
return mm1 + 1.0'''
def ip(self, lam, unit='kev'):
if np.size(lam) > 1:
return np.zeros(np.size(lam))
else:
return 0.0
def cm(self, lam, unit='kev'):
return self.rp(lam, unit=unit) + 0j
def plot(self, ax, lam, unit='kev', **kwargs):
assert unit in c.ALLOWED_LAM_UNITS
rp = self.rp(lam, unit=unit)
ax.plot(lam, rp-1.0, **kwargs)
ax.set_ylabel("m-1")
if unit == 'kev':
ax.set_xlabel("Energy (keV)")
if unit == 'angs':
ax.set_xlabel("Wavelength (Angstroms)")
| 35.296875 | 167 | 0.558212 | import numpy as np
from newdust import constants as c
__all__ = ['CmDrude']
RHO_DRUDE = 3.0
LAM_MAX = c.hc / 0.01
class CmDrude(object):
def __init__(self, rho=RHO_DRUDE):
self.cmtype = 'Drude'
self.rho = rho
self.citation = "Using the Drude approximation.\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)"
def rp(self, lam, unit='kev'):
assert unit in c.ALLOWED_LAM_UNITS
lam_cm = c._lam_cm(lam, unit)
mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)
return mm1 + 1.0
def ip(self, lam, unit='kev'):
if np.size(lam) > 1:
return np.zeros(np.size(lam))
else:
return 0.0
def cm(self, lam, unit='kev'):
return self.rp(lam, unit=unit) + 0j
def plot(self, ax, lam, unit='kev', **kwargs):
assert unit in c.ALLOWED_LAM_UNITS
rp = self.rp(lam, unit=unit)
ax.plot(lam, rp-1.0, **kwargs)
ax.set_ylabel("m-1")
if unit == 'kev':
ax.set_xlabel("Energy (keV)")
if unit == 'angs':
ax.set_xlabel("Wavelength (Angstroms)")
| true | true |
f7301aac92ea6041e0cee9eb39608de909e9d91c | 6,055 | py | Python | tests/doc_test/doc_export_id/conf.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | null | null | null | tests/doc_test/doc_export_id/conf.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | 2 | 2022-02-13T19:49:18.000Z | 2022-02-13T19:49:18.000Z | tests/doc_test/doc_export_id/conf.py | David-Le-Nir/sphinxcontrib-needs | fe809445505fa1e9bf5963eab1d6283dad405e92 | [
"MIT"
] | null | null | null | #
# needs test docs documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 28 11:37:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../sphinxcontrib"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinxcontrib.needs", "sphinxcontrib.plantuml"]
needs_types = [
{"directive": "story", "title": "User Story", "prefix": "US_", "color": "#BFD8D2", "style": "node"},
{"directive": "spec", "title": "Specification", "prefix": "SP_", "color": "#FEDCD2", "style": "node"},
{"directive": "impl", "title": "Implementation", "prefix": "IM_", "color": "#DF744A", "style": "node"},
{"directive": "test", "title": "Test Case", "prefix": "TC_", "color": "#DCB239", "style": "node"},
]
needs_extra_links = [
{
"option": "links",
"incoming": "is linked by",
"outgoing": "links to",
"copy": False,
"style": "#black",
"style_part": "dotted,#black",
},
{
"option": "blocks",
"incoming": "is blocked by",
"outgoing": "blocks",
"copy": True,
"style": "bold,#AA0000",
},
{
"option": "tests",
"incoming": "is tested by",
"outgoing": "tests",
"copy": False,
"style": "dashed,#00AA00",
"style_part": "dotted,#00AA00",
},
]
needs_flow_link_types = ["links", "tests"]
plantuml = "java -jar %s" % os.path.join(os.path.dirname(__file__), "..", "..", "..", "docs", "utils", "plantuml.jar")
plantuml_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "needs test docs"
copyright = "2017, team useblocks"
author = "team useblocks"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "needstestdocsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "needstestdocs.tex", "needs test docs Documentation", "team useblocks", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "needstestdocs", "needs test docs Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"needstestdocs",
"needs test docs Documentation",
author,
"needstestdocs",
"One line description of project.",
"Miscellaneous",
),
]
| 32.553763 | 118 | 0.647399 |
import os
import sys
sys.path.insert(0, os.path.abspath("../../sphinxcontrib"))
extensions = ["sphinxcontrib.needs", "sphinxcontrib.plantuml"]
needs_types = [
{"directive": "story", "title": "User Story", "prefix": "US_", "color": "#BFD8D2", "style": "node"},
{"directive": "spec", "title": "Specification", "prefix": "SP_", "color": "#FEDCD2", "style": "node"},
{"directive": "impl", "title": "Implementation", "prefix": "IM_", "color": "#DF744A", "style": "node"},
{"directive": "test", "title": "Test Case", "prefix": "TC_", "color": "#DCB239", "style": "node"},
]
needs_extra_links = [
{
"option": "links",
"incoming": "is linked by",
"outgoing": "links to",
"copy": False,
"style": "#black",
"style_part": "dotted,#black",
},
{
"option": "blocks",
"incoming": "is blocked by",
"outgoing": "blocks",
"copy": True,
"style": "bold,#AA0000",
},
{
"option": "tests",
"incoming": "is tested by",
"outgoing": "tests",
"copy": False,
"style": "dashed,#00AA00",
"style_part": "dotted,#00AA00",
},
]
needs_flow_link_types = ["links", "tests"]
plantuml = "java -jar %s" % os.path.join(os.path.dirname(__file__), "..", "..", "..", "docs", "utils", "plantuml.jar")
plantuml_output_format = "svg"
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "needs test docs"
copyright = "2017, team useblocks"
author = "team useblocks"
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "needstestdocsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "needstestdocs.tex", "needs test docs Documentation", "team useblocks", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "needstestdocs", "needs test docs Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"needstestdocs",
"needs test docs Documentation",
author,
"needstestdocs",
"One line description of project.",
"Miscellaneous",
),
]
| true | true |
f7301c822967e62a7a90c57cb1b15bfd69425390 | 1,409 | py | Python | pyganalytics/extract.py | dacker-team/pyganalytics | e64eedc582ddd31cc9534cf414d1734b4f512f9e | [
"BSD-2-Clause"
] | 6 | 2018-05-07T14:33:32.000Z | 2019-12-05T12:58:24.000Z | pyganalytics/extract.py | dacker-team/pyganalytics | e64eedc582ddd31cc9534cf414d1734b4f512f9e | [
"BSD-2-Clause"
] | 1 | 2020-02-17T09:24:54.000Z | 2020-02-17T09:24:54.000Z | pyganalytics/extract.py | dacker-team/pyganalytics | e64eedc582ddd31cc9534cf414d1734b4f512f9e | [
"BSD-2-Clause"
] | null | null | null | def get_metrics(response):
"""
Extract asked metrics from api response
@list_metrics : list of dict
"""
list_metrics = []
for i in response['reports'][0]['columnHeader']['metricHeader']['metricHeaderEntries']:
list_metrics.append(i['name'])
return list_metrics
def get_dimensions(response):
"""
Extract asked dimensions from api response
@list_dimensions : list of dict
"""
return response['reports'][0]['columnHeader']['dimensions']
def extract_api_data(response):
"""
Extract all data from api response
"""
try:
rows = response['reports'][0]['data']['rows']
except:
return []
try:
samples_read_counts = response['reports'][0]['data']['samplesReadCounts']
sampling_space_sizes = response['reports'][0]['data']['samplesReadCounts']
print("SAMPLING")
print(samples_read_counts)
print(sampling_space_sizes)
exit()
except:
pass
metric_response = get_metrics(response)
dimensions_response = get_dimensions(response)
data = []
for row in rows:
d = {}
j = 0
for i in dimensions_response:
d[i] = row['dimensions'][j]
j = j + 1
j = 0
for i in metric_response:
d[i] = row['metrics'][0]['values'][j]
j = j + 1
data.append(d)
return data
| 26.092593 | 91 | 0.584102 | def get_metrics(response):
list_metrics = []
for i in response['reports'][0]['columnHeader']['metricHeader']['metricHeaderEntries']:
list_metrics.append(i['name'])
return list_metrics
def get_dimensions(response):
return response['reports'][0]['columnHeader']['dimensions']
def extract_api_data(response):
try:
rows = response['reports'][0]['data']['rows']
except:
return []
try:
samples_read_counts = response['reports'][0]['data']['samplesReadCounts']
sampling_space_sizes = response['reports'][0]['data']['samplesReadCounts']
print("SAMPLING")
print(samples_read_counts)
print(sampling_space_sizes)
exit()
except:
pass
metric_response = get_metrics(response)
dimensions_response = get_dimensions(response)
data = []
for row in rows:
d = {}
j = 0
for i in dimensions_response:
d[i] = row['dimensions'][j]
j = j + 1
j = 0
for i in metric_response:
d[i] = row['metrics'][0]['values'][j]
j = j + 1
data.append(d)
return data
| true | true |
f7301e348946657ef9cc15b2a18a1e18c5bb9a53 | 1,556 | py | Python | Graph/P02_DepthFirstSearch.py | Abhishekkumar001/Data-Structures-using-Python-master | b17b1f50032f1460000f411e22a419675a0c08dc | [
"MIT"
] | null | null | null | Graph/P02_DepthFirstSearch.py | Abhishekkumar001/Data-Structures-using-Python-master | b17b1f50032f1460000f411e22a419675a0c08dc | [
"MIT"
] | null | null | null | Graph/P02_DepthFirstSearch.py | Abhishekkumar001/Data-Structures-using-Python-master | b17b1f50032f1460000f411e22a419675a0c08dc | [
"MIT"
] | null | null | null | class Graph():
def __init__(self):
self.vertex = {}
# for printing the Graph vertexes
def printGraph(self):
print(self.vertex)
for i in self.vertex.keys():
print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
# for adding the edge beween two vertexes
def addEdge(self, fromVertex, toVertex):
# check if vertex is already present,
if fromVertex in self.vertex.keys():
self.vertex[fromVertex].append(toVertex)
else:
# else make a new vertex
self.vertex[fromVertex] = [toVertex]
def DFS(self):
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if visited[i] == False:
self.DFSRec(i, visited)
def DFSRec(self, startVertex, visited):
# mark start vertex as visited
visited[startVertex] = True
print(startVertex, end = ' ')
# Recur for all the vertexes that are adjacent to this node
for i in self.vertex.keys():
if visited[i] == False:
self.DFSRec(i, visited)
if __name__ == '__main__':
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
g.printGraph()
print('DFS:')
g.DFS()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 25.933333 | 74 | 0.539846 | class Graph():
def __init__(self):
self.vertex = {}
def printGraph(self):
print(self.vertex)
for i in self.vertex.keys():
print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]]))
def addEdge(self, fromVertex, toVertex):
if fromVertex in self.vertex.keys():
self.vertex[fromVertex].append(toVertex)
else:
self.vertex[fromVertex] = [toVertex]
def DFS(self):
visited = [False] * len(self.vertex)
for i in range(len(self.vertex)):
if visited[i] == False:
self.DFSRec(i, visited)
def DFSRec(self, startVertex, visited):
visited[startVertex] = True
print(startVertex, end = ' ')
for i in self.vertex.keys():
if visited[i] == False:
self.DFSRec(i, visited)
if __name__ == '__main__':
g = Graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
g.printGraph()
print('DFS:')
g.DFS()
| true | true |
f7301e8a0de80422b08e8287165f800e2b77df36 | 29,295 | py | Python | perfkitbenchmarker/benchmark_spec.py | sachinpatkar/PerfKitBenchmarker | ed2898278244d71501de87bb181d50b3561dcf44 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/benchmark_spec.py | sachinpatkar/PerfKitBenchmarker | ed2898278244d71501de87bb181d50b3561dcf44 | [
"Apache-2.0"
] | 1 | 2021-03-26T00:41:05.000Z | 2021-03-26T00:41:05.000Z | perfkitbenchmarker/benchmark_spec.py | sachinpatkar/PerfKitBenchmarker | ed2898278244d71501de87bb181d50b3561dcf44 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container for all data required for a benchmark to run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import datetime
import importlib
import logging
import os
import pickle
import threading
import uuid
from perfkitbenchmarker import benchmark_status
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import cloud_tpu
from perfkitbenchmarker import container_service
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import managed_relational_db
from perfkitbenchmarker import nfs_service
from perfkitbenchmarker import os_types
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
from perfkitbenchmarker import smb_service
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import stages
from perfkitbenchmarker import static_virtual_machine as static_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
import six
from six.moves import range
import six.moves._thread
import six.moves.copyreg
def PickleLock(lock):
return UnPickleLock, (lock.locked(),)
def UnPickleLock(locked, *args):
lock = threading.Lock()
if locked:
if not lock.acquire(False):
raise pickle.UnpicklingError('Cannot acquire lock')
return lock
six.moves.copyreg.pickle(six.moves._thread.LockType, PickleLock)
SUPPORTED = 'strict'
NOT_EXCLUDED = 'permissive'
SKIP_CHECK = 'none'
# GCP labels only allow hyphens (-), underscores (_), lowercase characters, and
# numbers and International characters.
# metadata allow all characters and numbers.
METADATA_TIME_FORMAT = '%Y%m%dt%H%M%Sz'
FLAGS = flags.FLAGS
flags.DEFINE_enum('cloud', providers.GCP, providers.VALID_CLOUDS,
'Name of the cloud to use.')
flags.DEFINE_string('scratch_dir', None,
'Base name for all scratch disk directories in the VM. '
'Upon creation, these directories will have numbers '
'appended to them (for example /scratch0, /scratch1, etc).')
flags.DEFINE_string('startup_script', None,
'Script to run right after vm boot.')
flags.DEFINE_string('postrun_script', None,
'Script to run right after run stage.')
flags.DEFINE_integer('create_and_boot_post_task_delay', None,
'Delay in seconds to delay in between boot tasks.')
# pyformat: disable
flags.DEFINE_enum('benchmark_compatibility_checking', SUPPORTED,
[SUPPORTED, NOT_EXCLUDED, SKIP_CHECK],
'Method used to check compatibility between the benchmark '
' and the cloud. ' + SUPPORTED + ' runs the benchmark only'
' if the cloud provider has declared it supported. ' +
NOT_EXCLUDED + ' runs the benchmark unless it has been'
' declared not supported by the cloud provider. ' + SKIP_CHECK
+ ' does not do the compatibility'
' check.')
# pyformat: enable
class BenchmarkSpec(object):
"""Contains the various data required to make a benchmark run."""
total_benchmarks = 0
def __init__(self, benchmark_module, benchmark_config, benchmark_uid):
"""Initialize a BenchmarkSpec object.
Args:
benchmark_module: The benchmark module object.
benchmark_config: BenchmarkConfigSpec. The configuration for the
benchmark.
benchmark_uid: An identifier unique to this run of the benchmark even
if the same benchmark is run multiple times with different configs.
"""
self.config = benchmark_config
self.name = benchmark_module.BENCHMARK_NAME
self.uid = benchmark_uid
self.status = benchmark_status.SKIPPED
self.failed_substatus = None
self.status_detail = None
BenchmarkSpec.total_benchmarks += 1
self.sequence_number = BenchmarkSpec.total_benchmarks
self.vms = []
self.networks = {}
self.firewalls = {}
self.networks_lock = threading.Lock()
self.firewalls_lock = threading.Lock()
self.vm_groups = {}
self.container_specs = benchmark_config.container_specs or {}
self.container_registry = None
self.deleted = False
self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4())
self.always_call_cleanup = False
self.spark_service = None
self.dpb_service = None
self.container_cluster = None
self.managed_relational_db = None
self.tpus = []
self.tpu_groups = {}
self.edw_service = None
self.nfs_service = None
self.smb_service = None
self.app_groups = {}
self._zone_index = 0
self.capacity_reservations = []
# Modules can't be pickled, but functions can, so we store the functions
# necessary to run the benchmark.
self.BenchmarkPrepare = benchmark_module.Prepare
self.BenchmarkRun = benchmark_module.Run
self.BenchmarkCleanup = benchmark_module.Cleanup
# Set the current thread's BenchmarkSpec object to this one.
context.SetThreadBenchmarkSpec(self)
def __repr__(self):
return '%s(%r)' % (self.__class__, self.__dict__)
def __str__(self):
return(
'Benchmark name: {0}\nFlags: {1}'
.format(self.name, self.config.flags))
@contextlib.contextmanager
def RedirectGlobalFlags(self):
"""Redirects flag reads and writes to the benchmark-specific flags object.
Within the enclosed code block, reads and writes to the flags.FLAGS object
are redirected to a copy that has been merged with config-provided flag
overrides specific to this benchmark run.
"""
with self.config.RedirectFlags(FLAGS):
yield
def ConstructContainerCluster(self):
"""Create the container cluster."""
if self.config.container_cluster is None:
return
cloud = self.config.container_cluster.cloud
cluster_type = self.config.container_cluster.type
providers.LoadProvider(cloud)
container_cluster_class = container_service.GetContainerClusterClass(
cloud, cluster_type)
self.container_cluster = container_cluster_class(
self.config.container_cluster)
def ConstructContainerRegistry(self):
"""Create the container registry."""
if self.config.container_registry is None:
return
cloud = self.config.container_registry.cloud
providers.LoadProvider(cloud)
container_registry_class = container_service.GetContainerRegistryClass(
cloud)
self.container_registry = container_registry_class(
self.config.container_registry)
def ConstructDpbService(self):
"""Create the dpb_service object and create groups for its vms."""
if self.config.dpb_service is None:
return
providers.LoadProvider(self.config.dpb_service.worker_group.cloud)
dpb_service_class = dpb_service.GetDpbServiceClass(
self.config.dpb_service.service_type)
self.dpb_service = dpb_service_class(self.config.dpb_service)
def ConstructManagedRelationalDb(self):
"""Create the managed relational db and create groups for its vms."""
if self.config.managed_relational_db is None:
return
cloud = self.config.managed_relational_db.cloud
providers.LoadProvider(cloud)
managed_relational_db_class = (
managed_relational_db.GetManagedRelationalDbClass(cloud))
self.managed_relational_db = managed_relational_db_class(
self.config.managed_relational_db)
def ConstructTpuGroup(self, group_spec):
"""Constructs the BenchmarkSpec's cloud TPU objects."""
if group_spec is None:
return
cloud = group_spec.cloud
providers.LoadProvider(cloud)
tpu_class = cloud_tpu.GetTpuClass(cloud)
return tpu_class(group_spec)
def ConstructTpu(self):
"""Constructs the BenchmarkSpec's cloud TPU objects."""
tpu_group_specs = self.config.tpu_groups
for group_name, group_spec in sorted(six.iteritems(tpu_group_specs)):
tpu = self.ConstructTpuGroup(group_spec)
self.tpu_groups[group_name] = tpu
self.tpus.append(tpu)
def ConstructEdwService(self):
"""Create the edw_service object."""
if self.config.edw_service is None:
return
# Load necessary modules from the provider to account for dependencies
providers.LoadProvider(
edw_service.TYPE_2_PROVIDER.get(self.config.edw_service.type))
# Load the module for the edw service based on type
edw_service_module = importlib.import_module(edw_service.TYPE_2_MODULE.get(
self.config.edw_service.type))
edw_service_class = getattr(edw_service_module,
self.config.edw_service.type[0].upper() +
self.config.edw_service.type[1:])
# Check if a new instance needs to be created or restored from snapshot
self.edw_service = edw_service_class(self.config.edw_service)
def ConstructNfsService(self):
"""Construct the NFS service object.
Creates an NFS Service only if an NFS disk is found in the disk_specs.
"""
if self.nfs_service:
logging.info('NFS service already created: %s', self.nfs_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.NFS:
continue
if disk_spec.nfs_ip_address:
self.nfs_service = nfs_service.StaticNfsService(disk_spec)
else:
cloud = group_spec.cloud
providers.LoadProvider(cloud)
nfs_class = nfs_service.GetNfsServiceClass(cloud)
self.nfs_service = nfs_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('NFS service %s', self.nfs_service)
break
def ConstructSmbService(self):
"""Construct the SMB service object.
Creates an SMB Service only if an SMB disk is found in the disk_specs.
"""
if self.smb_service:
logging.info('SMB service already created: %s', self.smb_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.SMB:
continue
cloud = group_spec.cloud
providers.LoadProvider(cloud)
smb_class = smb_service.GetSmbServiceClass(cloud)
self.smb_service = smb_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('SMB service %s', self.smb_service)
break
def ConstructVirtualMachineGroup(self, group_name, group_spec):
"""Construct the virtual machine(s) needed for a group."""
vms = []
vm_count = group_spec.vm_count
disk_count = group_spec.disk_count
# First create the Static VM objects.
if group_spec.static_vms:
specs = [
spec for spec in group_spec.static_vms
if (FLAGS.static_vm_tags is None or spec.tag in FLAGS.static_vm_tags)
][:vm_count]
for vm_spec in specs:
static_vm_class = static_vm.GetStaticVmClass(vm_spec.os_type)
vms.append(static_vm_class(vm_spec))
os_type = group_spec.os_type
cloud = group_spec.cloud
# This throws an exception if the benchmark is not
# supported.
self._CheckBenchmarkSupport(cloud)
# Then create the remaining VM objects using VM and disk specs.
if group_spec.disk_spec:
disk_spec = group_spec.disk_spec
# disk_spec.disk_type may contain legacy values that were
# copied from FLAGS.scratch_disk_type into
# FLAGS.data_disk_type at the beginning of the run. We
# translate them here, rather than earlier, because here is
# where we know what cloud we're using and therefore we're
# able to pick the right translation table.
disk_spec.disk_type = disk.WarnAndTranslateDiskTypes(
disk_spec.disk_type, cloud)
else:
disk_spec = None
for _ in range(vm_count - len(vms)):
# Assign a zone to each VM sequentially from the --zones flag.
if FLAGS.zones or FLAGS.extra_zones or FLAGS.zone:
zone_list = FLAGS.zones + FLAGS.extra_zones + FLAGS.zone
group_spec.vm_spec.zone = zone_list[self._zone_index]
self._zone_index = (self._zone_index + 1
if self._zone_index < len(zone_list) - 1 else 0)
vm = self._CreateVirtualMachine(group_spec.vm_spec, os_type, cloud)
if disk_spec and not vm.is_static:
if disk_spec.disk_type == disk.LOCAL and disk_count is None:
disk_count = vm.max_local_disks
vm.disk_specs = [copy.copy(disk_spec) for _ in range(disk_count)]
# In the event that we need to create multiple disks from the same
# DiskSpec, we need to ensure that they have different mount points.
if (disk_count > 1 and disk_spec.mount_point):
for i, spec in enumerate(vm.disk_specs):
spec.mount_point += str(i)
vms.append(vm)
return vms
def ConstructCapacityReservations(self):
"""Construct capacity reservations for each VM group."""
if not FLAGS.use_capacity_reservations:
return
for vm_group in six.itervalues(self.vm_groups):
cloud = vm_group[0].CLOUD
providers.LoadProvider(cloud)
capacity_reservation_class = capacity_reservation.GetResourceClass(
cloud)
self.capacity_reservations.append(
capacity_reservation_class(vm_group))
def _CheckBenchmarkSupport(self, cloud):
"""Throw an exception if the benchmark isn't supported."""
if FLAGS.benchmark_compatibility_checking == SKIP_CHECK:
return
provider_info_class = provider_info.GetProviderInfoClass(cloud)
benchmark_ok = provider_info_class.IsBenchmarkSupported(self.name)
if FLAGS.benchmark_compatibility_checking == NOT_EXCLUDED:
if benchmark_ok is None:
benchmark_ok = True
if not benchmark_ok:
raise ValueError('Provider {0} does not support {1}. Use '
'--benchmark_compatibility_checking=none '
'to override this check.'.format(
provider_info_class.CLOUD, self.name))
def _ConstructJujuController(self, group_spec):
"""Construct a VirtualMachine object for a Juju controller."""
juju_spec = copy.copy(group_spec)
juju_spec.vm_count = 1
jujuvms = self.ConstructVirtualMachineGroup('juju', juju_spec)
if len(jujuvms):
jujuvm = jujuvms.pop()
jujuvm.is_controller = True
return jujuvm
return None
def ConstructVirtualMachines(self):
"""Constructs the BenchmarkSpec's VirtualMachine objects."""
vm_group_specs = self.config.vm_groups
clouds = {}
for group_name, group_spec in sorted(six.iteritems(vm_group_specs)):
vms = self.ConstructVirtualMachineGroup(group_name, group_spec)
if group_spec.os_type == os_types.JUJU:
# The Juju VM needs to be created first, so that subsequent units can
# be properly added under its control.
if group_spec.cloud in clouds:
jujuvm = clouds[group_spec.cloud]
else:
jujuvm = self._ConstructJujuController(group_spec)
clouds[group_spec.cloud] = jujuvm
for vm in vms:
vm.controller = clouds[group_spec.cloud]
vm.vm_group = group_name
jujuvm.units.extend(vms)
if jujuvm and jujuvm not in self.vms:
self.vms.extend([jujuvm])
self.vm_groups['%s_juju_controller' % group_spec.cloud] = [jujuvm]
self.vm_groups[group_name] = vms
self.vms.extend(vms)
# If we have a spark service, it needs to access the master_group and
# the worker group.
if (self.config.spark_service and
self.config.spark_service.service_type == spark_service.PKB_MANAGED):
for group_name in 'master_group', 'worker_group':
self.spark_service.vms[group_name] = self.vm_groups[group_name]
def ConstructSparkService(self):
"""Create the spark_service object and create groups for its vms."""
if self.config.spark_service is None:
return
spark_spec = self.config.spark_service
# Worker group is required, master group is optional
cloud = spark_spec.worker_group.cloud
if spark_spec.master_group:
cloud = spark_spec.master_group.cloud
providers.LoadProvider(cloud)
service_type = spark_spec.service_type
spark_service_class = spark_service.GetSparkServiceClass(
cloud, service_type)
self.spark_service = spark_service_class(spark_spec)
# If this is Pkb managed, the benchmark spec needs to adopt vms.
if service_type == spark_service.PKB_MANAGED:
for name, spec in [('master_group', spark_spec.master_group),
('worker_group', spark_spec.worker_group)]:
if name in self.config.vm_groups:
raise Exception('Cannot have a vm group {0} with a {1} spark '
'service'.format(name, spark_service.PKB_MANAGED))
self.config.vm_groups[name] = spec
def Prepare(self):
targets = [(vm.PrepareBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def Provision(self):
"""Prepares the VMs and networks necessary for the benchmark to run."""
# Create capacity reservations if the cloud supports it. Note that the
# capacity reservation class may update the VMs themselves. This is true
# on AWS, because the VM needs to be aware of the capacity resrevation id
# before its Create() method is called. Furthermore, if the user does not
# specify an AWS zone, but a region instead, the AwsCapacityReservation
# class will make a reservation in a zone that has sufficient capacity.
# In this case the VM's zone attribute, and the VMs network instance
# need to be updated as well.
if self.capacity_reservations:
vm_util.RunThreaded(lambda res: res.Create(), self.capacity_reservations)
# Sort networks into a guaranteed order of creation based on dict key.
# There is a finite limit on the number of threads that are created to
# provision networks. Until support is added to provision resources in an
# order based on dependencies, this key ordering can be used to avoid
# deadlock by placing dependent networks later and their dependencies
# earlier. As an example, AWS stores both per-region and per-zone objects
# in this dict, and each per-zone object depends on a corresponding
# per-region object, so the per-region objects are given keys that come
# first when sorted.
networks = [self.networks[key]
for key in sorted(six.iterkeys(self.networks))]
vm_util.RunThreaded(lambda net: net.Create(), networks)
if self.container_registry:
self.container_registry.Create()
for container_spec in six.itervalues(self.container_specs):
if container_spec.static_image:
continue
container_spec.image = self.container_registry.GetOrBuild(
container_spec.image)
if self.container_cluster:
self.container_cluster.Create()
# do after network setup but before VM created
if self.nfs_service:
self.nfs_service.Create()
if self.smb_service:
self.smb_service.Create()
if self.vms:
# We separate out creating, booting, and preparing the VMs into two phases
# so that we don't slow down the creation of all the VMs by running
# commands on the VMs that booted.
vm_util.RunThreaded(
self.CreateAndBootVm,
self.vms,
post_task_delay=FLAGS.create_and_boot_post_task_delay)
vm_util.RunThreaded(self.PrepareVmAfterBoot, self.vms)
sshable_vms = [
vm for vm in self.vms if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
sshable_vm_groups = {}
for group_name, group_vms in six.iteritems(self.vm_groups):
sshable_vm_groups[group_name] = [
vm for vm in group_vms
if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
vm_util.GenerateSSHConfig(sshable_vms, sshable_vm_groups)
if self.spark_service:
self.spark_service.Create()
if self.dpb_service:
self.dpb_service.Create()
if self.managed_relational_db:
self.managed_relational_db.client_vm = self.vms[0]
self.managed_relational_db.Create()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Create(), self.tpus)
if self.edw_service:
if not self.edw_service.user_managed:
# The benchmark creates the Redshift cluster's subnet group in the
# already provisioned virtual private cloud (vpc).
for network in networks:
if network.__class__.__name__ == 'AwsNetwork':
self.config.edw_service.subnet_id = network.subnet.id
self.edw_service.Create()
def Delete(self):
if self.deleted:
return
if self.container_registry:
self.container_registry.Delete()
if self.spark_service:
self.spark_service.Delete()
if self.dpb_service:
self.dpb_service.Delete()
if self.managed_relational_db:
self.managed_relational_db.Delete()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Delete(), self.tpus)
if self.edw_service:
self.edw_service.Delete()
if self.nfs_service:
self.nfs_service.Delete()
if self.smb_service:
self.smb_service.Delete()
# Note: It is ok to delete capacity reservations before deleting the VMs,
# and will actually save money (mere seconds of usage).
if self.capacity_reservations:
try:
vm_util.RunThreaded(lambda reservation: reservation.Delete(),
self.capacity_reservations)
except Exception: # pylint: disable=broad-except
logging.exception('Got an exception deleting CapacityReservations. '
'Attempting to continue tearing down.')
if self.vms:
try:
vm_util.RunThreaded(self.DeleteVm, self.vms)
except Exception:
logging.exception('Got an exception deleting VMs. '
'Attempting to continue tearing down.')
for firewall in six.itervalues(self.firewalls):
try:
firewall.DisallowAllPorts()
except Exception:
logging.exception('Got an exception disabling firewalls. '
'Attempting to continue tearing down.')
if self.container_cluster:
self.container_cluster.DeleteServices()
self.container_cluster.DeleteContainers()
self.container_cluster.Delete()
for net in six.itervalues(self.networks):
try:
net.Delete()
except Exception:
logging.exception('Got an exception deleting networks. '
'Attempting to continue tearing down.')
self.deleted = True
def GetSamples(self):
"""Returns samples created from benchmark resources."""
samples = []
if self.container_cluster:
samples.extend(self.container_cluster.GetSamples())
if self.container_registry:
samples.extend(self.container_registry.GetSamples())
return samples
def StartBackgroundWorkload(self):
targets = [(vm.StartBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def StopBackgroundWorkload(self):
targets = [(vm.StopBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def _GetResourceDict(self, time_format, timeout_minutes=None):
"""Gets a list of tags to be used to tag resources."""
now_utc = datetime.datetime.utcnow()
if not timeout_minutes:
timeout_minutes = FLAGS.timeout_minutes
timeout_utc = (
now_utc +
datetime.timedelta(minutes=timeout_minutes))
tags = {
'timeout_utc': timeout_utc.strftime(time_format),
'create_time_utc': now_utc.strftime(time_format),
'benchmark': self.name,
'perfkit_uuid': self.uuid,
'owner': FLAGS.owner
}
return tags
def GetResourceTags(self, timeout_minutes=None):
"""Gets a list of tags to be used to tag resources."""
return self._GetResourceDict(METADATA_TIME_FORMAT, timeout_minutes)
def _CreateVirtualMachine(self, vm_spec, os_type, cloud):
"""Create a vm in zone.
Args:
vm_spec: A virtual_machine.BaseVmSpec object.
os_type: The type of operating system for the VM. See the flag of the
same name for more information.
cloud: The cloud for the VM. See the flag of the same name for more
information.
Returns:
A virtual_machine.BaseVirtualMachine object.
"""
vm = static_vm.StaticVirtualMachine.GetStaticVirtualMachine()
if vm:
return vm
vm_class = virtual_machine.GetVmClass(cloud, os_type)
if vm_class is None:
raise errors.Error(
'VMs of type %s" are not currently supported on cloud "%s".' %
(os_type, cloud))
return vm_class(vm_spec)
def CreateAndBootVm(self, vm):
"""Creates a single VM and waits for boot to complete.
Args:
vm: The BaseVirtualMachine object representing the VM.
"""
vm.Create()
logging.info('VM: %s', vm.ip_address)
logging.info('Waiting for boot completion.')
vm.AllowRemoteAccessPorts()
vm.WaitForBootCompletion()
def PrepareVmAfterBoot(self, vm):
"""Prepares a VM after it has booted.
This function will prepare a scratch disk if required.
Args:
vm: The BaseVirtualMachine object representing the VM.
Raises:
Exception: If --vm_metadata is malformed.
"""
vm_metadata = {
'benchmark':
self.name,
'perfkit_uuid':
self.uuid,
'benchmark_uid':
self.uid,
'create_time_utc':
datetime.datetime.utcfromtimestamp(vm.create_start_time),
'owner':
FLAGS.owner
}
for item in FLAGS.vm_metadata:
if ':' not in item:
raise Exception('"%s" not in expected key:value format' % item)
key, value = item.split(':', 1)
vm_metadata[key] = value
vm.AddMetadata(**vm_metadata)
vm.OnStartup()
if any((spec.disk_type == disk.LOCAL for spec in vm.disk_specs)):
vm.SetupLocalDisks()
for disk_spec in vm.disk_specs:
if disk_spec.disk_type == disk.RAM:
vm.CreateRamDisk(disk_spec)
else:
vm.CreateScratchDisk(disk_spec)
# TODO(user): Simplify disk logic.
if disk_spec.num_striped_disks > 1:
# scratch disks has already been created and striped together.
break
# This must come after Scratch Disk creation to support the
# Containerized VM case
vm.PrepareVMEnvironment()
def DeleteVm(self, vm):
"""Deletes a single vm and scratch disk if required.
Args:
vm: The BaseVirtualMachine object representing the VM.
"""
if vm.is_static and vm.install_packages:
vm.PackageCleanup()
vm.Delete()
vm.DeleteScratchDisks()
@staticmethod
def _GetPickleFilename(uid):
"""Returns the filename for the pickled BenchmarkSpec."""
return os.path.join(vm_util.GetTempDir(), uid)
def Pickle(self):
"""Pickles the spec so that it can be unpickled on a subsequent run."""
with open(self._GetPickleFilename(self.uid), 'wb') as pickle_file:
pickle.dump(self, pickle_file, 2)
@classmethod
def GetBenchmarkSpec(cls, benchmark_module, config, uid):
"""Unpickles or creates a BenchmarkSpec and returns it.
Args:
benchmark_module: The benchmark module object.
config: BenchmarkConfigSpec. The configuration for the benchmark.
uid: An identifier unique to this run of the benchmark even if the same
benchmark is run multiple times with different configs.
Returns:
A BenchmarkSpec object.
"""
if stages.PROVISION in FLAGS.run_stage:
return cls(benchmark_module, config, uid)
try:
with open(cls._GetPickleFilename(uid), 'rb') as pickle_file:
spec = pickle.load(pickle_file)
except Exception as e: # pylint: disable=broad-except
logging.error('Unable to unpickle spec file for benchmark %s.',
benchmark_module.BENCHMARK_NAME)
raise e
# Always let the spec be deleted after being unpickled so that
# it's possible to run cleanup even if cleanup has already run.
spec.deleted = False
spec.status = benchmark_status.SKIPPED
context.SetThreadBenchmarkSpec(spec)
return spec
| 37.557692 | 80 | 0.697901 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import datetime
import importlib
import logging
import os
import pickle
import threading
import uuid
from perfkitbenchmarker import benchmark_status
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import cloud_tpu
from perfkitbenchmarker import container_service
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import managed_relational_db
from perfkitbenchmarker import nfs_service
from perfkitbenchmarker import os_types
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
from perfkitbenchmarker import smb_service
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import stages
from perfkitbenchmarker import static_virtual_machine as static_vm
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
import six
from six.moves import range
import six.moves._thread
import six.moves.copyreg
def PickleLock(lock):
return UnPickleLock, (lock.locked(),)
def UnPickleLock(locked, *args):
lock = threading.Lock()
if locked:
if not lock.acquire(False):
raise pickle.UnpicklingError('Cannot acquire lock')
return lock
six.moves.copyreg.pickle(six.moves._thread.LockType, PickleLock)
SUPPORTED = 'strict'
NOT_EXCLUDED = 'permissive'
SKIP_CHECK = 'none'
METADATA_TIME_FORMAT = '%Y%m%dt%H%M%Sz'
FLAGS = flags.FLAGS
flags.DEFINE_enum('cloud', providers.GCP, providers.VALID_CLOUDS,
'Name of the cloud to use.')
flags.DEFINE_string('scratch_dir', None,
'Base name for all scratch disk directories in the VM. '
'Upon creation, these directories will have numbers '
'appended to them (for example /scratch0, /scratch1, etc).')
flags.DEFINE_string('startup_script', None,
'Script to run right after vm boot.')
flags.DEFINE_string('postrun_script', None,
'Script to run right after run stage.')
flags.DEFINE_integer('create_and_boot_post_task_delay', None,
'Delay in seconds to delay in between boot tasks.')
flags.DEFINE_enum('benchmark_compatibility_checking', SUPPORTED,
[SUPPORTED, NOT_EXCLUDED, SKIP_CHECK],
'Method used to check compatibility between the benchmark '
' and the cloud. ' + SUPPORTED + ' runs the benchmark only'
' if the cloud provider has declared it supported. ' +
NOT_EXCLUDED + ' runs the benchmark unless it has been'
' declared not supported by the cloud provider. ' + SKIP_CHECK
+ ' does not do the compatibility'
' check.')
class BenchmarkSpec(object):
total_benchmarks = 0
def __init__(self, benchmark_module, benchmark_config, benchmark_uid):
self.config = benchmark_config
self.name = benchmark_module.BENCHMARK_NAME
self.uid = benchmark_uid
self.status = benchmark_status.SKIPPED
self.failed_substatus = None
self.status_detail = None
BenchmarkSpec.total_benchmarks += 1
self.sequence_number = BenchmarkSpec.total_benchmarks
self.vms = []
self.networks = {}
self.firewalls = {}
self.networks_lock = threading.Lock()
self.firewalls_lock = threading.Lock()
self.vm_groups = {}
self.container_specs = benchmark_config.container_specs or {}
self.container_registry = None
self.deleted = False
self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4())
self.always_call_cleanup = False
self.spark_service = None
self.dpb_service = None
self.container_cluster = None
self.managed_relational_db = None
self.tpus = []
self.tpu_groups = {}
self.edw_service = None
self.nfs_service = None
self.smb_service = None
self.app_groups = {}
self._zone_index = 0
self.capacity_reservations = []
# necessary to run the benchmark.
self.BenchmarkPrepare = benchmark_module.Prepare
self.BenchmarkRun = benchmark_module.Run
self.BenchmarkCleanup = benchmark_module.Cleanup
# Set the current thread's BenchmarkSpec object to this one.
context.SetThreadBenchmarkSpec(self)
def __repr__(self):
return '%s(%r)' % (self.__class__, self.__dict__)
def __str__(self):
return(
'Benchmark name: {0}\nFlags: {1}'
.format(self.name, self.config.flags))
@contextlib.contextmanager
def RedirectGlobalFlags(self):
with self.config.RedirectFlags(FLAGS):
yield
def ConstructContainerCluster(self):
if self.config.container_cluster is None:
return
cloud = self.config.container_cluster.cloud
cluster_type = self.config.container_cluster.type
providers.LoadProvider(cloud)
container_cluster_class = container_service.GetContainerClusterClass(
cloud, cluster_type)
self.container_cluster = container_cluster_class(
self.config.container_cluster)
def ConstructContainerRegistry(self):
if self.config.container_registry is None:
return
cloud = self.config.container_registry.cloud
providers.LoadProvider(cloud)
container_registry_class = container_service.GetContainerRegistryClass(
cloud)
self.container_registry = container_registry_class(
self.config.container_registry)
def ConstructDpbService(self):
if self.config.dpb_service is None:
return
providers.LoadProvider(self.config.dpb_service.worker_group.cloud)
dpb_service_class = dpb_service.GetDpbServiceClass(
self.config.dpb_service.service_type)
self.dpb_service = dpb_service_class(self.config.dpb_service)
def ConstructManagedRelationalDb(self):
if self.config.managed_relational_db is None:
return
cloud = self.config.managed_relational_db.cloud
providers.LoadProvider(cloud)
managed_relational_db_class = (
managed_relational_db.GetManagedRelationalDbClass(cloud))
self.managed_relational_db = managed_relational_db_class(
self.config.managed_relational_db)
def ConstructTpuGroup(self, group_spec):
if group_spec is None:
return
cloud = group_spec.cloud
providers.LoadProvider(cloud)
tpu_class = cloud_tpu.GetTpuClass(cloud)
return tpu_class(group_spec)
def ConstructTpu(self):
tpu_group_specs = self.config.tpu_groups
for group_name, group_spec in sorted(six.iteritems(tpu_group_specs)):
tpu = self.ConstructTpuGroup(group_spec)
self.tpu_groups[group_name] = tpu
self.tpus.append(tpu)
def ConstructEdwService(self):
if self.config.edw_service is None:
return
providers.LoadProvider(
edw_service.TYPE_2_PROVIDER.get(self.config.edw_service.type))
edw_service_module = importlib.import_module(edw_service.TYPE_2_MODULE.get(
self.config.edw_service.type))
edw_service_class = getattr(edw_service_module,
self.config.edw_service.type[0].upper() +
self.config.edw_service.type[1:])
self.edw_service = edw_service_class(self.config.edw_service)
def ConstructNfsService(self):
if self.nfs_service:
logging.info('NFS service already created: %s', self.nfs_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.NFS:
continue
if disk_spec.nfs_ip_address:
self.nfs_service = nfs_service.StaticNfsService(disk_spec)
else:
cloud = group_spec.cloud
providers.LoadProvider(cloud)
nfs_class = nfs_service.GetNfsServiceClass(cloud)
self.nfs_service = nfs_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('NFS service %s', self.nfs_service)
break
def ConstructSmbService(self):
if self.smb_service:
logging.info('SMB service already created: %s', self.smb_service)
return
for group_spec in self.config.vm_groups.values():
if not group_spec.disk_spec or not group_spec.vm_count:
continue
disk_spec = group_spec.disk_spec
if disk_spec.disk_type != disk.SMB:
continue
cloud = group_spec.cloud
providers.LoadProvider(cloud)
smb_class = smb_service.GetSmbServiceClass(cloud)
self.smb_service = smb_class(disk_spec, group_spec.vm_spec.zone)
logging.debug('SMB service %s', self.smb_service)
break
def ConstructVirtualMachineGroup(self, group_name, group_spec):
vms = []
vm_count = group_spec.vm_count
disk_count = group_spec.disk_count
if group_spec.static_vms:
specs = [
spec for spec in group_spec.static_vms
if (FLAGS.static_vm_tags is None or spec.tag in FLAGS.static_vm_tags)
][:vm_count]
for vm_spec in specs:
static_vm_class = static_vm.GetStaticVmClass(vm_spec.os_type)
vms.append(static_vm_class(vm_spec))
os_type = group_spec.os_type
cloud = group_spec.cloud
self._CheckBenchmarkSupport(cloud)
if group_spec.disk_spec:
disk_spec = group_spec.disk_spec
disk_spec.disk_type = disk.WarnAndTranslateDiskTypes(
disk_spec.disk_type, cloud)
else:
disk_spec = None
for _ in range(vm_count - len(vms)):
if FLAGS.zones or FLAGS.extra_zones or FLAGS.zone:
zone_list = FLAGS.zones + FLAGS.extra_zones + FLAGS.zone
group_spec.vm_spec.zone = zone_list[self._zone_index]
self._zone_index = (self._zone_index + 1
if self._zone_index < len(zone_list) - 1 else 0)
vm = self._CreateVirtualMachine(group_spec.vm_spec, os_type, cloud)
if disk_spec and not vm.is_static:
if disk_spec.disk_type == disk.LOCAL and disk_count is None:
disk_count = vm.max_local_disks
vm.disk_specs = [copy.copy(disk_spec) for _ in range(disk_count)]
if (disk_count > 1 and disk_spec.mount_point):
for i, spec in enumerate(vm.disk_specs):
spec.mount_point += str(i)
vms.append(vm)
return vms
def ConstructCapacityReservations(self):
if not FLAGS.use_capacity_reservations:
return
for vm_group in six.itervalues(self.vm_groups):
cloud = vm_group[0].CLOUD
providers.LoadProvider(cloud)
capacity_reservation_class = capacity_reservation.GetResourceClass(
cloud)
self.capacity_reservations.append(
capacity_reservation_class(vm_group))
def _CheckBenchmarkSupport(self, cloud):
if FLAGS.benchmark_compatibility_checking == SKIP_CHECK:
return
provider_info_class = provider_info.GetProviderInfoClass(cloud)
benchmark_ok = provider_info_class.IsBenchmarkSupported(self.name)
if FLAGS.benchmark_compatibility_checking == NOT_EXCLUDED:
if benchmark_ok is None:
benchmark_ok = True
if not benchmark_ok:
raise ValueError('Provider {0} does not support {1}. Use '
'--benchmark_compatibility_checking=none '
'to override this check.'.format(
provider_info_class.CLOUD, self.name))
def _ConstructJujuController(self, group_spec):
juju_spec = copy.copy(group_spec)
juju_spec.vm_count = 1
jujuvms = self.ConstructVirtualMachineGroup('juju', juju_spec)
if len(jujuvms):
jujuvm = jujuvms.pop()
jujuvm.is_controller = True
return jujuvm
return None
def ConstructVirtualMachines(self):
vm_group_specs = self.config.vm_groups
clouds = {}
for group_name, group_spec in sorted(six.iteritems(vm_group_specs)):
vms = self.ConstructVirtualMachineGroup(group_name, group_spec)
if group_spec.os_type == os_types.JUJU:
if group_spec.cloud in clouds:
jujuvm = clouds[group_spec.cloud]
else:
jujuvm = self._ConstructJujuController(group_spec)
clouds[group_spec.cloud] = jujuvm
for vm in vms:
vm.controller = clouds[group_spec.cloud]
vm.vm_group = group_name
jujuvm.units.extend(vms)
if jujuvm and jujuvm not in self.vms:
self.vms.extend([jujuvm])
self.vm_groups['%s_juju_controller' % group_spec.cloud] = [jujuvm]
self.vm_groups[group_name] = vms
self.vms.extend(vms)
if (self.config.spark_service and
self.config.spark_service.service_type == spark_service.PKB_MANAGED):
for group_name in 'master_group', 'worker_group':
self.spark_service.vms[group_name] = self.vm_groups[group_name]
def ConstructSparkService(self):
if self.config.spark_service is None:
return
spark_spec = self.config.spark_service
cloud = spark_spec.worker_group.cloud
if spark_spec.master_group:
cloud = spark_spec.master_group.cloud
providers.LoadProvider(cloud)
service_type = spark_spec.service_type
spark_service_class = spark_service.GetSparkServiceClass(
cloud, service_type)
self.spark_service = spark_service_class(spark_spec)
if service_type == spark_service.PKB_MANAGED:
for name, spec in [('master_group', spark_spec.master_group),
('worker_group', spark_spec.worker_group)]:
if name in self.config.vm_groups:
raise Exception('Cannot have a vm group {0} with a {1} spark '
'service'.format(name, spark_service.PKB_MANAGED))
self.config.vm_groups[name] = spec
def Prepare(self):
targets = [(vm.PrepareBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def Provision(self):
# need to be updated as well.
if self.capacity_reservations:
vm_util.RunThreaded(lambda res: res.Create(), self.capacity_reservations)
# Sort networks into a guaranteed order of creation based on dict key.
# There is a finite limit on the number of threads that are created to
# provision networks. Until support is added to provision resources in an
# order based on dependencies, this key ordering can be used to avoid
# deadlock by placing dependent networks later and their dependencies
# earlier. As an example, AWS stores both per-region and per-zone objects
# in this dict, and each per-zone object depends on a corresponding
# per-region object, so the per-region objects are given keys that come
# first when sorted.
networks = [self.networks[key]
for key in sorted(six.iterkeys(self.networks))]
vm_util.RunThreaded(lambda net: net.Create(), networks)
if self.container_registry:
self.container_registry.Create()
for container_spec in six.itervalues(self.container_specs):
if container_spec.static_image:
continue
container_spec.image = self.container_registry.GetOrBuild(
container_spec.image)
if self.container_cluster:
self.container_cluster.Create()
# do after network setup but before VM created
if self.nfs_service:
self.nfs_service.Create()
if self.smb_service:
self.smb_service.Create()
if self.vms:
# We separate out creating, booting, and preparing the VMs into two phases
# so that we don't slow down the creation of all the VMs by running
vm_util.RunThreaded(
self.CreateAndBootVm,
self.vms,
post_task_delay=FLAGS.create_and_boot_post_task_delay)
vm_util.RunThreaded(self.PrepareVmAfterBoot, self.vms)
sshable_vms = [
vm for vm in self.vms if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
sshable_vm_groups = {}
for group_name, group_vms in six.iteritems(self.vm_groups):
sshable_vm_groups[group_name] = [
vm for vm in group_vms
if vm.OS_TYPE not in os_types.WINDOWS_OS_TYPES
]
vm_util.GenerateSSHConfig(sshable_vms, sshable_vm_groups)
if self.spark_service:
self.spark_service.Create()
if self.dpb_service:
self.dpb_service.Create()
if self.managed_relational_db:
self.managed_relational_db.client_vm = self.vms[0]
self.managed_relational_db.Create()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Create(), self.tpus)
if self.edw_service:
if not self.edw_service.user_managed:
# already provisioned virtual private cloud (vpc).
for network in networks:
if network.__class__.__name__ == 'AwsNetwork':
self.config.edw_service.subnet_id = network.subnet.id
self.edw_service.Create()
def Delete(self):
if self.deleted:
return
if self.container_registry:
self.container_registry.Delete()
if self.spark_service:
self.spark_service.Delete()
if self.dpb_service:
self.dpb_service.Delete()
if self.managed_relational_db:
self.managed_relational_db.Delete()
if self.tpus:
vm_util.RunThreaded(lambda tpu: tpu.Delete(), self.tpus)
if self.edw_service:
self.edw_service.Delete()
if self.nfs_service:
self.nfs_service.Delete()
if self.smb_service:
self.smb_service.Delete()
# Note: It is ok to delete capacity reservations before deleting the VMs,
# and will actually save money (mere seconds of usage).
if self.capacity_reservations:
try:
vm_util.RunThreaded(lambda reservation: reservation.Delete(),
self.capacity_reservations)
except Exception: # pylint: disable=broad-except
logging.exception('Got an exception deleting CapacityReservations. '
'Attempting to continue tearing down.')
if self.vms:
try:
vm_util.RunThreaded(self.DeleteVm, self.vms)
except Exception:
logging.exception('Got an exception deleting VMs. '
'Attempting to continue tearing down.')
for firewall in six.itervalues(self.firewalls):
try:
firewall.DisallowAllPorts()
except Exception:
logging.exception('Got an exception disabling firewalls. '
'Attempting to continue tearing down.')
if self.container_cluster:
self.container_cluster.DeleteServices()
self.container_cluster.DeleteContainers()
self.container_cluster.Delete()
for net in six.itervalues(self.networks):
try:
net.Delete()
except Exception:
logging.exception('Got an exception deleting networks. '
'Attempting to continue tearing down.')
self.deleted = True
def GetSamples(self):
samples = []
if self.container_cluster:
samples.extend(self.container_cluster.GetSamples())
if self.container_registry:
samples.extend(self.container_registry.GetSamples())
return samples
def StartBackgroundWorkload(self):
targets = [(vm.StartBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def StopBackgroundWorkload(self):
targets = [(vm.StopBackgroundWorkload, (), {}) for vm in self.vms]
vm_util.RunParallelThreads(targets, len(targets))
def _GetResourceDict(self, time_format, timeout_minutes=None):
now_utc = datetime.datetime.utcnow()
if not timeout_minutes:
timeout_minutes = FLAGS.timeout_minutes
timeout_utc = (
now_utc +
datetime.timedelta(minutes=timeout_minutes))
tags = {
'timeout_utc': timeout_utc.strftime(time_format),
'create_time_utc': now_utc.strftime(time_format),
'benchmark': self.name,
'perfkit_uuid': self.uuid,
'owner': FLAGS.owner
}
return tags
def GetResourceTags(self, timeout_minutes=None):
return self._GetResourceDict(METADATA_TIME_FORMAT, timeout_minutes)
def _CreateVirtualMachine(self, vm_spec, os_type, cloud):
vm = static_vm.StaticVirtualMachine.GetStaticVirtualMachine()
if vm:
return vm
vm_class = virtual_machine.GetVmClass(cloud, os_type)
if vm_class is None:
raise errors.Error(
'VMs of type %s" are not currently supported on cloud "%s".' %
(os_type, cloud))
return vm_class(vm_spec)
def CreateAndBootVm(self, vm):
vm.Create()
logging.info('VM: %s', vm.ip_address)
logging.info('Waiting for boot completion.')
vm.AllowRemoteAccessPorts()
vm.WaitForBootCompletion()
def PrepareVmAfterBoot(self, vm):
vm_metadata = {
'benchmark':
self.name,
'perfkit_uuid':
self.uuid,
'benchmark_uid':
self.uid,
'create_time_utc':
datetime.datetime.utcfromtimestamp(vm.create_start_time),
'owner':
FLAGS.owner
}
for item in FLAGS.vm_metadata:
if ':' not in item:
raise Exception('"%s" not in expected key:value format' % item)
key, value = item.split(':', 1)
vm_metadata[key] = value
vm.AddMetadata(**vm_metadata)
vm.OnStartup()
if any((spec.disk_type == disk.LOCAL for spec in vm.disk_specs)):
vm.SetupLocalDisks()
for disk_spec in vm.disk_specs:
if disk_spec.disk_type == disk.RAM:
vm.CreateRamDisk(disk_spec)
else:
vm.CreateScratchDisk(disk_spec)
# TODO(user): Simplify disk logic.
if disk_spec.num_striped_disks > 1:
# scratch disks has already been created and striped together.
break
# This must come after Scratch Disk creation to support the
# Containerized VM case
vm.PrepareVMEnvironment()
def DeleteVm(self, vm):
if vm.is_static and vm.install_packages:
vm.PackageCleanup()
vm.Delete()
vm.DeleteScratchDisks()
@staticmethod
def _GetPickleFilename(uid):
return os.path.join(vm_util.GetTempDir(), uid)
def Pickle(self):
with open(self._GetPickleFilename(self.uid), 'wb') as pickle_file:
pickle.dump(self, pickle_file, 2)
@classmethod
def GetBenchmarkSpec(cls, benchmark_module, config, uid):
if stages.PROVISION in FLAGS.run_stage:
return cls(benchmark_module, config, uid)
try:
with open(cls._GetPickleFilename(uid), 'rb') as pickle_file:
spec = pickle.load(pickle_file)
except Exception as e: # pylint: disable=broad-except
logging.error('Unable to unpickle spec file for benchmark %s.',
benchmark_module.BENCHMARK_NAME)
raise e
# Always let the spec be deleted after being unpickled so that
# it's possible to run cleanup even if cleanup has already run.
spec.deleted = False
spec.status = benchmark_status.SKIPPED
context.SetThreadBenchmarkSpec(spec)
return spec
| true | true |
f7301e9a6d3cfd1a5e7377df11cefe34a6c0eeb6 | 251 | py | Python | orio/validators/__init__.py | NIEHS/orio | bf996ebcf41d14b945cd5848460b023376b637ad | [
"MIT"
] | 6 | 2017-04-19T08:49:20.000Z | 2020-12-18T16:13:28.000Z | orio/validators/__init__.py | NIEHS/orio | bf996ebcf41d14b945cd5848460b023376b637ad | [
"MIT"
] | null | null | null | orio/validators/__init__.py | NIEHS/orio | bf996ebcf41d14b945cd5848460b023376b637ad | [
"MIT"
] | 1 | 2020-12-18T16:14:45.000Z | 2020-12-18T16:14:45.000Z | from .base import get_chromosome_size_path # noqa
from .analysis import AnalysisValidator # noqa
from .bigwig import BigWigValidator # noqa
from .feature_list import FeatureListValidator # noqa
from .sort_vector import SortVectorValidator # noqa
| 41.833333 | 54 | 0.820717 | from .base import get_chromosome_size_path
from .analysis import AnalysisValidator
from .bigwig import BigWigValidator
from .feature_list import FeatureListValidator
from .sort_vector import SortVectorValidator
| true | true |
f7301f02b847c99bc61b46703e6c682dfcb86ed7 | 2,745 | py | Python | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | 1 | 2018-06-19T18:35:55.000Z | 2018-06-19T18:35:55.000Z | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | null | null | null | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | null | null | null | """
Store physical constants and calculate astronomical units
from and to the International System of Units.
"""
class UnitsConverter:
"""
UnitsConverter converts different astronomical units
from and to the International System of Units (SI).
"""
# All constants in SI units.
G = 6.67408e-11
LIGHT_SPEED = 2.99792458e8
PLANCK_CONSTANT = 6.62606979e-34
BOLTZMANN_CONSTANT = 1.38064852e-23
STEFAN_BOLTZMANN_CONSTANT = 5.670367e-8
SUN_MASS = 1.9884e30
SUN_RADIUS = 6.957e8
AU = 1.49597e11
PARSEC = 3.086e16
DAY = 86400
MINUTE = 60
def convert_sun_mass_to_kg(self, mass):
"""Convert mass in the solar mass to kilograms."""
return mass*self.SUN_MASS
def convert_kg_to_sun_mass(self, mass):
"""Convert mass in kilograms to the solar mass."""
return mass/self.SUN_MASS
def convert_days_to_sec(self, days):
"""Convert time in days to seconds."""
return days*self.DAY
def convert_sec_to_days(self, seconds):
"""Convert time in seconds to days."""
return seconds/self.DAY
def convert_min_to_sec(self, minutes):
"""Convert time in minutes to seconds."""
return self.MINUTE*minutes
def convert_sec_to_min(self, seconds):
"""Convert time in seconds to minutes."""
return seconds/self.MINUTE
def convert_hours_to_sec(self, minutes):
"""Convert time in hours to seconds."""
return (self.MINUTE**2)*minutes
def convert_sec_to_hours(self, seconds):
"""Convert time in seconds to hours."""
return seconds/(self.MINUTE**2)
def convert_au_to_m(self, au):
"""Convert length in the Astronomical Units to meters."""
return au*self.AU
def convert_m_to_au(self, meters):
"""Convert length in meters to the Astronomical Units."""
return meters/self.AU
def convert_kmps_to_mps(self, speed):
"""Convert speed in kilometers per second to meters per second."""
return 1000.0*speed
def convert_mps_to_kmps(self, speed):
"""Convert speed in meters per second to kilometers per second."""
return speed/1000.0
def convert_m_to_sun_radius(self, meters):
"""Convert length in meters to the solar radius."""
return meters/self.SUN_RADIUS
def convert_sun_radius_to_m(self, radii):
"""Convert length in the solar radius to meters."""
return self.SUN_RADIUS*radii
def convert_m_to_parsec(self, meters):
"""Convert length in meters to parsec."""
return meters/self.PARSEC
def convert_parsec_to_m(self, parsecs):
"""Convert length in parsec to meters."""
return parsecs*self.PARSEC
| 30.5 | 74 | 0.660109 |
class UnitsConverter:
G = 6.67408e-11
LIGHT_SPEED = 2.99792458e8
PLANCK_CONSTANT = 6.62606979e-34
BOLTZMANN_CONSTANT = 1.38064852e-23
STEFAN_BOLTZMANN_CONSTANT = 5.670367e-8
SUN_MASS = 1.9884e30
SUN_RADIUS = 6.957e8
AU = 1.49597e11
PARSEC = 3.086e16
DAY = 86400
MINUTE = 60
def convert_sun_mass_to_kg(self, mass):
return mass*self.SUN_MASS
def convert_kg_to_sun_mass(self, mass):
return mass/self.SUN_MASS
def convert_days_to_sec(self, days):
return days*self.DAY
def convert_sec_to_days(self, seconds):
return seconds/self.DAY
def convert_min_to_sec(self, minutes):
return self.MINUTE*minutes
def convert_sec_to_min(self, seconds):
return seconds/self.MINUTE
def convert_hours_to_sec(self, minutes):
return (self.MINUTE**2)*minutes
def convert_sec_to_hours(self, seconds):
return seconds/(self.MINUTE**2)
def convert_au_to_m(self, au):
return au*self.AU
def convert_m_to_au(self, meters):
return meters/self.AU
def convert_kmps_to_mps(self, speed):
return 1000.0*speed
def convert_mps_to_kmps(self, speed):
return speed/1000.0
def convert_m_to_sun_radius(self, meters):
return meters/self.SUN_RADIUS
def convert_sun_radius_to_m(self, radii):
return self.SUN_RADIUS*radii
def convert_m_to_parsec(self, meters):
return meters/self.PARSEC
def convert_parsec_to_m(self, parsecs):
return parsecs*self.PARSEC
| true | true |
f7301f8356df84dbab174e8675eb84f92ee3ede7 | 13,286 | py | Python | DaisyXMusic/modules/song.py | skypar/NITIN_VC__MUSIC | d09f303018ab51bd1b7266ba339e3325520fe429 | [
"Unlicense"
] | null | null | null | DaisyXMusic/modules/song.py | skypar/NITIN_VC__MUSIC | d09f303018ab51bd1b7266ba339e3325520fe429 | [
"Unlicense"
] | null | null | null | DaisyXMusic/modules/song.py | skypar/NITIN_VC__MUSIC | d09f303018ab51bd1b7266ba339e3325520fe429 | [
"Unlicense"
] | null | null | null | # Daisyxmusic (Telegram bot project )
# Copyright (C) 2021 Inukaasith
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import asyncio
import math
import os
import time
from random import randint
from urllib.parse import urlparse
import aiofiles
import aiohttp
import requests
import wget
import youtube_dl
from pyrogram import Client, filters
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Message
from youtube_search import YoutubeSearch
from youtubesearchpython import SearchVideos
from DaisyXMusic.config import DURATION_LIMIT
from DaisyXMusic.modules.play import arq
@Client.on_message(filters.command("song") & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "[" + user_name + "](tg://user?id=" + str(user_id) + ")"
query = ""
for i in message.command[1:]:
query += " " + str(i)
print(query)
m = message.reply("🔎 Finding the song...")
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
# print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f"thumb{title}.jpg"
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, "wb").write(thumb.content)
duration = results[0]["duration"]
results[0]["url_suffix"]
results[0]["views"]
except Exception as e:
m.edit("❌ Found Nothing.\n\nTry another keywork or maybe spell it properly.")
print(str(e))
return
m.edit("Downloading the song ")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = "**🎵 Uploaded by DaisyXMusic**"
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
message.reply_audio(
audio_file,
caption=rep,
thumb=thumb_name,
parse_mode="md",
title=title,
duration=dur,
)
m.delete()
except Exception as e:
m.edit("❌ Error")
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["🔴" for i in range(math.floor(percentage / 10))]),
"".join(["🔘" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
"format": "bestaudio/best",
"writethumbnail": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode="wb")
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
if not songs.ok:
await message.reply_text(songs.result)
return
sname = songs.result[0].song
slink = songs.result[0].media_url
ssingers = songs.result[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname, performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
# Deezer Music
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
if not songs.ok:
await message.reply_text(songs.result)
return
title = songs.result[0].title
url = songs.result[0].url
artist = songs.result[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title, performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "video"]))
async def ytmusic(client, message: Message):
global is_downloading
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id, f"`Getting {urlissed} From Youtube Servers. Please Wait.`"
)
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > DURATION_LIMIT:
await pablo.edit(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception:
# await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(
message.chat.id,
video=open(file_stark, "rb"),
duration=int(ytdl_data["duration"]),
file_name=str(ytdl_data["title"]),
thumb=sedlyf,
caption=capy,
supports_streaming=True,
progress=progress,
progress_args=(
pablo,
c_time,
f"`Uploading {urlissed} Song From YouTube Music!`",
file_stark,
),
)
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
| 31.187793 | 129 | 0.594084 |
from __future__ import unicode_literals
import asyncio
import math
import os
import time
from random import randint
from urllib.parse import urlparse
import aiofiles
import aiohttp
import requests
import wget
import youtube_dl
from pyrogram import Client, filters
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Message
from youtube_search import YoutubeSearch
from youtubesearchpython import SearchVideos
from DaisyXMusic.config import DURATION_LIMIT
from DaisyXMusic.modules.play import arq
@Client.on_message(filters.command("song") & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "[" + user_name + "](tg://user?id=" + str(user_id) + ")"
query = ""
for i in message.command[1:]:
query += " " + str(i)
print(query)
m = message.reply("🔎 Finding the song...")
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f"thumb{title}.jpg"
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, "wb").write(thumb.content)
duration = results[0]["duration"]
results[0]["url_suffix"]
results[0]["views"]
except Exception as e:
m.edit("❌ Found Nothing.\n\nTry another keywork or maybe spell it properly.")
print(str(e))
return
m.edit("Downloading the song ")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = "**🎵 Uploaded by DaisyXMusic**"
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
message.reply_audio(
audio_file,
caption=rep,
thumb=thumb_name,
parse_mode="md",
title=title,
duration=dur,
)
m.delete()
except Exception as e:
m.edit("❌ Error")
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["🔴" for i in range(math.floor(percentage / 10))]),
"".join(["🔘" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
"format": "bestaudio/best",
"writethumbnail": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode="wb")
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
if not songs.ok:
await message.reply_text(songs.result)
return
sname = songs.result[0].song
slink = songs.result[0].media_url
ssingers = songs.result[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname, performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
if not songs.ok:
await message.reply_text(songs.result)
return
title = songs.result[0].title
url = songs.result[0].url
artist = songs.result[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title, performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "video"]))
async def ytmusic(client, message: Message):
global is_downloading
if is_downloading:
await message.reply_text(
"Another download is in progress, try again after sometime."
)
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id, f"`Getting {urlissed} From Youtube Servers. Please Wait.`"
)
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > DURATION_LIMIT:
await pablo.edit(
f"❌ Videos longer than {DURATION_LIMIT} minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception:
# await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"**Video Name ➠** `{thum}` \n**Requested For :** `{urlissed}` \n**Channel :** `{thums}` \n**Link :** `{mo}`"
await client.send_video(
message.chat.id,
video=open(file_stark, "rb"),
duration=int(ytdl_data["duration"]),
file_name=str(ytdl_data["title"]),
thumb=sedlyf,
caption=capy,
supports_streaming=True,
progress=progress,
progress_args=(
pablo,
c_time,
f"`Uploading {urlissed} Song From YouTube Music!`",
file_stark,
),
)
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
| true | true |
f7301f8cc870b348e9ccc730cb09c52bb390f47c | 2,406 | py | Python | src/ast_toolbox/mcts/tree_plot.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 29 | 2019-01-09T23:56:35.000Z | 2022-03-18T03:41:10.000Z | src/ast_toolbox/mcts/tree_plot.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 39 | 2019-01-10T00:32:26.000Z | 2022-03-12T00:29:05.000Z | src/ast_toolbox/mcts/tree_plot.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 11 | 2019-01-10T08:11:47.000Z | 2021-12-28T15:56:02.000Z | import uuid
import pydot
def get_root(tree):
"""Get the root node of the tree.
Parameters
----------
tree : dict
The tree.
Returns
----------
s : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`
The root state.
"""
for s in tree.keys():
if s.parent is None:
return s
def s2node(s, tree):
"""Transfer the AST state to pydot node.
Parameters
----------
s : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`
The AST state.
tree : dict
The tree.
Returns
----------
node : :py:class:`pydot.Node`
The pydot node.
"""
if s in tree.keys():
return pydot.Node(str(uuid.uuid4()), label='n=' + str(tree[s].n))
else:
return None
def add_children(s, s_node, tree, graph, d):
"""Add successors of s into the graph.
Parameters
----------
s : :py:class:`ast_toolbox.mcts.AdaptiveStressTesting.ASTState`
The AST state.
s_node : :py:class:`pydot.Node`
The pydot node corresponding to s.
tree : dict
The tree.
graph : :py:class:`pydot.Dot`
The pydot graph.
d : int
The depth.
"""
if d > 0:
for a in tree[s].a.keys():
n = tree[s].a[a].n
q = tree[s].a[a].q
assert len(tree[s].a[a].s.keys()) == 1
for ns in tree[s].a[a].s.keys():
ns_node = s2node(ns, tree)
if ns_node is not None:
graph.add_node(ns_node)
graph.add_edge(pydot.Edge(s_node, ns_node, label="n=" + str(n) + " a=" + str(a.get()) + " q=" + str(q)))
# graph.add_edge(pydot.Edge(s_node, ns_node))
add_children(ns, ns_node, tree, graph, d - 1)
def plot_tree(tree, d, path, format="svg"):
"""Plot the tree.
Parameters
----------
tree : dict
The tree.
d : int
The depth.
path : str
The plotting path.
format : str
The plotting format.
"""
graph = pydot.Dot(graph_type='digraph')
root = get_root(tree)
root_node = s2node(root, tree)
graph.add_node(root_node)
add_children(root, root_node, tree, graph, d)
filename = path + "." + format
if format == "svg":
graph.write(filename)
elif format == "png":
graph.write(filename)
| 24.30303 | 124 | 0.525769 | import uuid
import pydot
def get_root(tree):
for s in tree.keys():
if s.parent is None:
return s
def s2node(s, tree):
if s in tree.keys():
return pydot.Node(str(uuid.uuid4()), label='n=' + str(tree[s].n))
else:
return None
def add_children(s, s_node, tree, graph, d):
if d > 0:
for a in tree[s].a.keys():
n = tree[s].a[a].n
q = tree[s].a[a].q
assert len(tree[s].a[a].s.keys()) == 1
for ns in tree[s].a[a].s.keys():
ns_node = s2node(ns, tree)
if ns_node is not None:
graph.add_node(ns_node)
graph.add_edge(pydot.Edge(s_node, ns_node, label="n=" + str(n) + " a=" + str(a.get()) + " q=" + str(q)))
add_children(ns, ns_node, tree, graph, d - 1)
def plot_tree(tree, d, path, format="svg"):
graph = pydot.Dot(graph_type='digraph')
root = get_root(tree)
root_node = s2node(root, tree)
graph.add_node(root_node)
add_children(root, root_node, tree, graph, d)
filename = path + "." + format
if format == "svg":
graph.write(filename)
elif format == "png":
graph.write(filename)
| true | true |
f7302001cebe3ed8119fa24d93d0e279303b50c9 | 1,169 | py | Python | soctrack/utils.py | kcsry/soctrack | b8cfa8aefab98f8daeea0cafa10932f67bcda9dc | [
"MIT"
] | null | null | null | soctrack/utils.py | kcsry/soctrack | b8cfa8aefab98f8daeea0cafa10932f67bcda9dc | [
"MIT"
] | 1 | 2022-02-21T19:16:21.000Z | 2022-02-21T19:16:21.000Z | soctrack/utils.py | kcsry/soctrack | b8cfa8aefab98f8daeea0cafa10932f67bcda9dc | [
"MIT"
] | null | null | null | import re
import time
from django.conf import settings
from django.utils.timezone import make_aware, make_naive, utc
re_pattern = re.compile('[^\u0000-\uD7FF\uE000-\uFFFF]+', re.UNICODE)
def sanitize_unicode(u):
# We may not be able to store all special characters thanks
# to MySQL's boneheadedness, so accept the minor loss of fidelity
# in the cached data fields.
return re_pattern.sub(' ', u)
def could_be_utc(dt):
if settings.USE_TZ:
return make_aware(dt, utc)
else:
if dt.tzinfo:
return make_naive(dt, utc)
else:
return dt
class RetryError(Exception):
def __init__(self, fn, tries, exceptions):
name = getattr(fn, '__name__', None) or str(fn)
super().__init__('%s failed after %d tries' % (name, tries))
self.exceptions = exceptions
def retry_with_backoff(fn, tries=10, wait=0.5, exception_classes=(Exception,)):
exceptions = []
for t in range(tries):
try:
return fn()
except exception_classes as e:
exceptions.append(e)
time.sleep(wait * (1.5**t))
raise RetryError(fn, tries, exceptions)
| 27.186047 | 79 | 0.640719 | import re
import time
from django.conf import settings
from django.utils.timezone import make_aware, make_naive, utc
re_pattern = re.compile('[^\u0000-\uD7FF\uE000-\uFFFF]+', re.UNICODE)
def sanitize_unicode(u):
# in the cached data fields.
return re_pattern.sub(' ', u)
def could_be_utc(dt):
if settings.USE_TZ:
return make_aware(dt, utc)
else:
if dt.tzinfo:
return make_naive(dt, utc)
else:
return dt
class RetryError(Exception):
def __init__(self, fn, tries, exceptions):
name = getattr(fn, '__name__', None) or str(fn)
super().__init__('%s failed after %d tries' % (name, tries))
self.exceptions = exceptions
def retry_with_backoff(fn, tries=10, wait=0.5, exception_classes=(Exception,)):
exceptions = []
for t in range(tries):
try:
return fn()
except exception_classes as e:
exceptions.append(e)
time.sleep(wait * (1.5**t))
raise RetryError(fn, tries, exceptions)
| true | true |
f73021df6f3092bb9478c485785dab873f73a920 | 7,244 | py | Python | pynodegl-utils/pynodegl_utils/misc.py | gopro/gopro-lib-node.gl | 60d163cf65385b772f1d83d125f0bfe09db5352b | [
"Apache-2.0"
] | 45 | 2017-02-07T13:13:52.000Z | 2022-03-18T07:12:39.000Z | pynodegl-utils/pynodegl_utils/misc.py | mrobertseidowsky-gpsw/gopro-lib-node.gl | fbe427e4ea108468a63cde5920cf6f6ce03478bc | [
"Apache-2.0"
] | 148 | 2017-02-02T18:35:32.000Z | 2022-03-28T13:53:22.000Z | pynodegl-utils/pynodegl_utils/misc.py | mrobertseidowsky-gpsw/gopro-lib-node.gl | fbe427e4ea108468a63cde5920cf6f6ce03478bc | [
"Apache-2.0"
] | 28 | 2017-02-01T10:06:47.000Z | 2022-03-18T07:12:26.000Z | #
# Copyright 2016 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import os.path as op
import tempfile
import platform
import math
import inspect
import json
import subprocess
import random
import pynodegl as ngl
from collections import namedtuple
def scene(**controls):
def real_decorator(scene_func):
def func_wrapper(idict=None, **extra_args):
if idict is None:
idict = {}
elif isinstance(idict, SceneCfg):
idict = idict.as_dict()
scene_cfg = SceneCfg(**idict)
scene = scene_func(scene_cfg, **extra_args)
odict = scene_cfg.as_dict()
odict['scene'] = scene
return odict
# Construct widgets specs
widgets_specs = []
func_specs = inspect.getfullargspec(scene_func)
if func_specs.defaults:
nb_optionnals = len(func_specs.defaults)
for i, key in enumerate(func_specs.args[-nb_optionnals:]):
# Set controller defaults according to the function prototype
control = controls.get(key)
if control is not None:
default = func_specs.defaults[i]
ctl_id = control.__class__.__name__
ctl_data = control._asdict()
widgets_specs.append((key, default, ctl_id, ctl_data))
# Transfers the widget specs to the UI.
# We could use the return value but it's better if the user can still
# call its decorated scene function transparently inside his own code
# without getting garbage along the return value.
func_wrapper.widgets_specs = widgets_specs
# Flag the scene as a scene function so it's registered in the UI.
func_wrapper.iam_a_ngl_scene_func = True
# Inherit doc from original function
func_wrapper.__doc__ = scene_func.__doc__
return func_wrapper
return real_decorator
scene.Range = namedtuple('Range', 'range unit_base', defaults=([0, 1], 1))
scene.Vector = namedtuple('Vector', 'n minv maxv', defaults=(None, None))
scene.Color = namedtuple('Color', '')
scene.Bool = namedtuple('Bool', '')
scene.File = namedtuple('File', 'filter', defaults=('',))
scene.List = namedtuple('List', 'choices')
scene.Text = namedtuple('Text', '')
class Media:
def __init__(self, filename):
self._filename = filename
self._set_media_dimensions()
def _set_media_dimensions(self):
data = subprocess.check_output(['ffprobe', '-v', '0',
'-select_streams', 'v:0',
'-of', 'json',
'-show_streams', '-show_format',
self._filename])
data = json.loads(data)
st = data['streams'][0]
self._dimensions = (st['width'], st['height'])
self._duration = float(data['format'].get('duration', 1))
self._framerate = tuple(int(x) for x in st['avg_frame_rate'].split('/'))
@property
def filename(self):
return self._filename
@property
def width(self):
return self._dimensions[0]
@property
def height(self):
return self._dimensions[1]
@property
def dimensions(self):
return self._dimensions
@property
def duration(self):
return self._duration
@property
def framerate(self):
return self._framerate
@property
def framerate_float(self):
return self._framerate[0] / float(self._framerate[1])
def get_nodegl_tempdir():
tmpdir = op.join(tempfile.gettempdir(), 'nodegl')
os.makedirs(tmpdir, exist_ok=True)
return tmpdir
class SceneCfg:
_DEFAULT_MEDIA_FILE = op.join(get_nodegl_tempdir(), 'ngl-media.mp4')
_DEFAULT_FIELDS = {
'aspect_ratio': (16, 9),
'duration': 30.0,
'framerate': (60, 1),
'backend': 'opengl',
'samples': 0,
'system': platform.system(),
'files': [],
'medias': None,
'clear_color': (0.0, 0.0, 0.0, 1.0),
}
def __init__(self, **kwargs):
for field, def_val in self._DEFAULT_FIELDS.items():
val = kwargs.get(field, def_val)
setattr(self, field, val)
if self.medias is None:
media_file = self._DEFAULT_MEDIA_FILE
if not op.exists(self._DEFAULT_MEDIA_FILE):
ret = subprocess.call(['ffmpeg', '-nostdin', '-nostats', '-f', 'lavfi', '-i',
'testsrc2=d=%d:r=%d/%d' % (int(math.ceil(self.duration)),
self.framerate[0], self.framerate[1]),
media_file])
if ret:
raise Exception('Unable to create a media file using ffmpeg (ret=%d)' % ret)
self.medias = [Media(media_file)]
# Predictible random number generator
self.rng = random.Random(0)
@property
def aspect_ratio_float(self):
return self.aspect_ratio[0] / float(self.aspect_ratio[1])
def as_dict(self):
odict = {}
for field in self._DEFAULT_FIELDS.keys():
odict[field] = getattr(self, field)
return odict
def _get_shader(self, name, stype, shader_path):
filename = f'{name}.{stype}'
if shader_path is None:
shader_path = op.join(op.dirname(__file__), 'examples', 'shaders')
with open(op.join(shader_path, filename)) as f:
return f.read()
def get_frag(self, name, shader_path=None):
return self._get_shader(name, 'frag', shader_path)
def get_vert(self, name, shader_path=None):
return self._get_shader(name, 'vert', shader_path)
def get_comp(self, name, shader_path=None):
return self._get_shader(name, 'comp', shader_path)
def get_viewport(width, height, aspect_ratio):
view_width = width
view_height = width * aspect_ratio[1] / aspect_ratio[0]
if view_height > height:
view_height = height
view_width = height * aspect_ratio[0] / aspect_ratio[1]
view_x = (width - view_width) // 2
view_y = (height - view_height) // 2
return (view_x, view_y, view_width, view_height)
def get_backend(backend):
backend_map = {
'opengl': ngl.BACKEND_OPENGL,
'opengles': ngl.BACKEND_OPENGLES,
}
return backend_map[backend]
| 33.077626 | 104 | 0.609194 |
import os
import os.path as op
import tempfile
import platform
import math
import inspect
import json
import subprocess
import random
import pynodegl as ngl
from collections import namedtuple
def scene(**controls):
def real_decorator(scene_func):
def func_wrapper(idict=None, **extra_args):
if idict is None:
idict = {}
elif isinstance(idict, SceneCfg):
idict = idict.as_dict()
scene_cfg = SceneCfg(**idict)
scene = scene_func(scene_cfg, **extra_args)
odict = scene_cfg.as_dict()
odict['scene'] = scene
return odict
widgets_specs = []
func_specs = inspect.getfullargspec(scene_func)
if func_specs.defaults:
nb_optionnals = len(func_specs.defaults)
for i, key in enumerate(func_specs.args[-nb_optionnals:]):
control = controls.get(key)
if control is not None:
default = func_specs.defaults[i]
ctl_id = control.__class__.__name__
ctl_data = control._asdict()
widgets_specs.append((key, default, ctl_id, ctl_data))
# call its decorated scene function transparently inside his own code
# without getting garbage along the return value.
func_wrapper.widgets_specs = widgets_specs
# Flag the scene as a scene function so it's registered in the UI.
func_wrapper.iam_a_ngl_scene_func = True
func_wrapper.__doc__ = scene_func.__doc__
return func_wrapper
return real_decorator
scene.Range = namedtuple('Range', 'range unit_base', defaults=([0, 1], 1))
scene.Vector = namedtuple('Vector', 'n minv maxv', defaults=(None, None))
scene.Color = namedtuple('Color', '')
scene.Bool = namedtuple('Bool', '')
scene.File = namedtuple('File', 'filter', defaults=('',))
scene.List = namedtuple('List', 'choices')
scene.Text = namedtuple('Text', '')
class Media:
def __init__(self, filename):
self._filename = filename
self._set_media_dimensions()
def _set_media_dimensions(self):
data = subprocess.check_output(['ffprobe', '-v', '0',
'-select_streams', 'v:0',
'-of', 'json',
'-show_streams', '-show_format',
self._filename])
data = json.loads(data)
st = data['streams'][0]
self._dimensions = (st['width'], st['height'])
self._duration = float(data['format'].get('duration', 1))
self._framerate = tuple(int(x) for x in st['avg_frame_rate'].split('/'))
@property
def filename(self):
return self._filename
@property
def width(self):
return self._dimensions[0]
@property
def height(self):
return self._dimensions[1]
@property
def dimensions(self):
return self._dimensions
@property
def duration(self):
return self._duration
@property
def framerate(self):
return self._framerate
@property
def framerate_float(self):
return self._framerate[0] / float(self._framerate[1])
def get_nodegl_tempdir():
tmpdir = op.join(tempfile.gettempdir(), 'nodegl')
os.makedirs(tmpdir, exist_ok=True)
return tmpdir
class SceneCfg:
_DEFAULT_MEDIA_FILE = op.join(get_nodegl_tempdir(), 'ngl-media.mp4')
_DEFAULT_FIELDS = {
'aspect_ratio': (16, 9),
'duration': 30.0,
'framerate': (60, 1),
'backend': 'opengl',
'samples': 0,
'system': platform.system(),
'files': [],
'medias': None,
'clear_color': (0.0, 0.0, 0.0, 1.0),
}
def __init__(self, **kwargs):
for field, def_val in self._DEFAULT_FIELDS.items():
val = kwargs.get(field, def_val)
setattr(self, field, val)
if self.medias is None:
media_file = self._DEFAULT_MEDIA_FILE
if not op.exists(self._DEFAULT_MEDIA_FILE):
ret = subprocess.call(['ffmpeg', '-nostdin', '-nostats', '-f', 'lavfi', '-i',
'testsrc2=d=%d:r=%d/%d' % (int(math.ceil(self.duration)),
self.framerate[0], self.framerate[1]),
media_file])
if ret:
raise Exception('Unable to create a media file using ffmpeg (ret=%d)' % ret)
self.medias = [Media(media_file)]
self.rng = random.Random(0)
@property
def aspect_ratio_float(self):
return self.aspect_ratio[0] / float(self.aspect_ratio[1])
def as_dict(self):
odict = {}
for field in self._DEFAULT_FIELDS.keys():
odict[field] = getattr(self, field)
return odict
def _get_shader(self, name, stype, shader_path):
filename = f'{name}.{stype}'
if shader_path is None:
shader_path = op.join(op.dirname(__file__), 'examples', 'shaders')
with open(op.join(shader_path, filename)) as f:
return f.read()
def get_frag(self, name, shader_path=None):
return self._get_shader(name, 'frag', shader_path)
def get_vert(self, name, shader_path=None):
return self._get_shader(name, 'vert', shader_path)
def get_comp(self, name, shader_path=None):
return self._get_shader(name, 'comp', shader_path)
def get_viewport(width, height, aspect_ratio):
view_width = width
view_height = width * aspect_ratio[1] / aspect_ratio[0]
if view_height > height:
view_height = height
view_width = height * aspect_ratio[0] / aspect_ratio[1]
view_x = (width - view_width) // 2
view_y = (height - view_height) // 2
return (view_x, view_y, view_width, view_height)
def get_backend(backend):
backend_map = {
'opengl': ngl.BACKEND_OPENGL,
'opengles': ngl.BACKEND_OPENGLES,
}
return backend_map[backend]
| true | true |
f73022031f62389d37c109bce546ac2a6294bc50 | 6,377 | py | Python | main_pretrain.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 693 | 2021-05-31T15:48:32.000Z | 2022-03-31T17:12:46.000Z | main_pretrain.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 151 | 2021-06-15T00:22:57.000Z | 2022-03-27T15:17:02.000Z | main_pretrain.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 79 | 2021-06-02T10:31:15.000Z | 2022-03-25T01:25:09.000Z | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from pprint import pprint
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from solo.args.setup import parse_args_pretrain
from solo.methods import METHODS
from solo.utils.auto_resumer import AutoResumer
try:
from solo.methods.dali import PretrainABC
except ImportError as e:
print(e)
_dali_avaliable = False
else:
_dali_avaliable = True
try:
from solo.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
import types
from solo.utils.checkpointer import Checkpointer
from solo.utils.classification_dataloader import prepare_data as prepare_data_classification
from solo.utils.pretrain_dataloader import (
prepare_dataloader,
prepare_datasets,
prepare_n_crop_transform,
prepare_transform,
)
def main():
seed_everything(5)
args = parse_args_pretrain()
assert args.method in METHODS, f"Choose from {METHODS.keys()}"
if args.num_large_crops != 2:
assert args.method == "wmse"
MethodClass = METHODS[args.method]
if args.dali:
assert (
_dali_avaliable
), "Dali is not currently avaiable, please install it first with [dali]."
MethodClass = types.new_class(f"Dali{MethodClass.__name__}", (PretrainABC, MethodClass))
model = MethodClass(**args.__dict__)
# pretrain dataloader
if not args.dali:
# asymmetric augmentations
if args.unique_augs > 1:
transform = [
prepare_transform(args.dataset, **kwargs) for kwargs in args.transform_kwargs
]
else:
transform = [prepare_transform(args.dataset, **args.transform_kwargs)]
transform = prepare_n_crop_transform(transform, num_crops_per_aug=args.num_crops_per_aug)
if args.debug_augmentations:
print("Transforms:")
pprint(transform)
train_dataset = prepare_datasets(
args.dataset,
transform,
data_dir=args.data_dir,
train_dir=args.train_dir,
no_labels=args.no_labels,
)
train_loader = prepare_dataloader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers
)
# normal dataloader for when it is available
if args.dataset == "custom" and (args.no_labels or args.val_dir is None):
val_loader = None
elif args.dataset in ["imagenet100", "imagenet"] and args.val_dir is None:
val_loader = None
else:
_, val_loader = prepare_data_classification(
args.dataset,
data_dir=args.data_dir,
train_dir=args.train_dir,
val_dir=args.val_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
callbacks = []
# wandb logging
if args.wandb:
wandb_logger = WandbLogger(
name=args.name,
project=args.project,
entity=args.entity,
offline=args.offline,
)
wandb_logger.watch(model, log="gradients", log_freq=100)
wandb_logger.log_hyperparams(args)
# lr logging
lr_monitor = LearningRateMonitor(logging_interval="epoch")
callbacks.append(lr_monitor)
if args.save_checkpoint:
# save checkpoint on last epoch only
ckpt = Checkpointer(
args,
logdir=os.path.join(args.checkpoint_dir, args.method),
frequency=args.checkpoint_frequency,
)
callbacks.append(ckpt)
if args.auto_umap:
assert (
_umap_available
), "UMAP is not currently avaiable, please install it first with [umap]."
auto_umap = AutoUMAP(
args,
logdir=os.path.join(args.auto_umap_dir, args.method),
frequency=args.auto_umap_frequency,
)
callbacks.append(auto_umap)
# 1.7 will deprecate resume_from_checkpoint, but for the moment
# the argument is the same, but we need to pass it as ckpt_path to trainer.fit
ckpt_path = None
if args.auto_resume and args.resume_from_checkpoint is None:
auto_resumer = AutoResumer(
checkpoint_dir=os.path.join(args.checkpoint_dir, args.method),
max_hours=args.auto_resumer_max_hours,
)
resume_from_checkpoint = auto_resumer.find_checkpoint(args)
if resume_from_checkpoint is not None:
print(
"Resuming from previous checkpoint that matches specifications:",
f"'{resume_from_checkpoint}'",
)
ckpt_path = resume_from_checkpoint
elif args.resume_from_checkpoint is not None:
ckpt_path = args.resume_from_checkpoint
del args.resume_from_checkpoint
trainer = Trainer.from_argparse_args(
args,
logger=wandb_logger if args.wandb else None,
callbacks=callbacks,
enable_checkpointing=False,
)
if args.dali:
trainer.fit(model, val_dataloaders=val_loader, ckpt_path=ckpt_path)
else:
trainer.fit(model, train_loader, val_loader, ckpt_path=ckpt_path)
if __name__ == "__main__":
main()
| 33.740741 | 97 | 0.682923 |
import os
from pprint import pprint
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from solo.args.setup import parse_args_pretrain
from solo.methods import METHODS
from solo.utils.auto_resumer import AutoResumer
try:
from solo.methods.dali import PretrainABC
except ImportError as e:
print(e)
_dali_avaliable = False
else:
_dali_avaliable = True
try:
from solo.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
import types
from solo.utils.checkpointer import Checkpointer
from solo.utils.classification_dataloader import prepare_data as prepare_data_classification
from solo.utils.pretrain_dataloader import (
prepare_dataloader,
prepare_datasets,
prepare_n_crop_transform,
prepare_transform,
)
def main():
seed_everything(5)
args = parse_args_pretrain()
assert args.method in METHODS, f"Choose from {METHODS.keys()}"
if args.num_large_crops != 2:
assert args.method == "wmse"
MethodClass = METHODS[args.method]
if args.dali:
assert (
_dali_avaliable
), "Dali is not currently avaiable, please install it first with [dali]."
MethodClass = types.new_class(f"Dali{MethodClass.__name__}", (PretrainABC, MethodClass))
model = MethodClass(**args.__dict__)
if not args.dali:
if args.unique_augs > 1:
transform = [
prepare_transform(args.dataset, **kwargs) for kwargs in args.transform_kwargs
]
else:
transform = [prepare_transform(args.dataset, **args.transform_kwargs)]
transform = prepare_n_crop_transform(transform, num_crops_per_aug=args.num_crops_per_aug)
if args.debug_augmentations:
print("Transforms:")
pprint(transform)
train_dataset = prepare_datasets(
args.dataset,
transform,
data_dir=args.data_dir,
train_dir=args.train_dir,
no_labels=args.no_labels,
)
train_loader = prepare_dataloader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers
)
if args.dataset == "custom" and (args.no_labels or args.val_dir is None):
val_loader = None
elif args.dataset in ["imagenet100", "imagenet"] and args.val_dir is None:
val_loader = None
else:
_, val_loader = prepare_data_classification(
args.dataset,
data_dir=args.data_dir,
train_dir=args.train_dir,
val_dir=args.val_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
callbacks = []
if args.wandb:
wandb_logger = WandbLogger(
name=args.name,
project=args.project,
entity=args.entity,
offline=args.offline,
)
wandb_logger.watch(model, log="gradients", log_freq=100)
wandb_logger.log_hyperparams(args)
lr_monitor = LearningRateMonitor(logging_interval="epoch")
callbacks.append(lr_monitor)
if args.save_checkpoint:
ckpt = Checkpointer(
args,
logdir=os.path.join(args.checkpoint_dir, args.method),
frequency=args.checkpoint_frequency,
)
callbacks.append(ckpt)
if args.auto_umap:
assert (
_umap_available
), "UMAP is not currently avaiable, please install it first with [umap]."
auto_umap = AutoUMAP(
args,
logdir=os.path.join(args.auto_umap_dir, args.method),
frequency=args.auto_umap_frequency,
)
callbacks.append(auto_umap)
ckpt_path = None
if args.auto_resume and args.resume_from_checkpoint is None:
auto_resumer = AutoResumer(
checkpoint_dir=os.path.join(args.checkpoint_dir, args.method),
max_hours=args.auto_resumer_max_hours,
)
resume_from_checkpoint = auto_resumer.find_checkpoint(args)
if resume_from_checkpoint is not None:
print(
"Resuming from previous checkpoint that matches specifications:",
f"'{resume_from_checkpoint}'",
)
ckpt_path = resume_from_checkpoint
elif args.resume_from_checkpoint is not None:
ckpt_path = args.resume_from_checkpoint
del args.resume_from_checkpoint
trainer = Trainer.from_argparse_args(
args,
logger=wandb_logger if args.wandb else None,
callbacks=callbacks,
enable_checkpointing=False,
)
if args.dali:
trainer.fit(model, val_dataloaders=val_loader, ckpt_path=ckpt_path)
else:
trainer.fit(model, train_loader, val_loader, ckpt_path=ckpt_path)
if __name__ == "__main__":
main()
| true | true |
f730226ebacc7664173975d612d2e800b7ac3472 | 20,723 | py | Python | Blackjack/blackjack.py | nairoukh-code/Python_Projects | 9a0e2adb6e352b301ed9e542be9c9f1cd16b95b0 | [
"MIT"
] | null | null | null | Blackjack/blackjack.py | nairoukh-code/Python_Projects | 9a0e2adb6e352b301ed9e542be9c9f1cd16b95b0 | [
"MIT"
] | null | null | null | Blackjack/blackjack.py | nairoukh-code/Python_Projects | 9a0e2adb6e352b301ed9e542be9c9f1cd16b95b0 | [
"MIT"
] | null | null | null | # took some ideas from this source ( https://dev.to/nexttech/build-a-blackjack-command-line-game-3o4b )
import random
from enum import Enum
from time import time
class Game_Status(Enum):
WIN = 1
LOSE = 2
PUSH = 3
class Card:
def __init__(self, suit, value):
self.suit = suit
self.value = value
def __repr__(self):
return " of ".join((self.value, self.suit))
class Deck:
def __init__(self):
self.cards = [Card(s, v) for s in ["Spades", "Clubs", "Hearts",
"Diamonds"] for v in ["A", "2", "3", "4", "5", "6",
"7", "8", "9", "10", "10", "10", "10"]] * 6
random.shuffle(self.cards)
def deal(self):
if len(self.cards) > 1:
return self.cards.pop(0)
else:
self.__init__()
return self.cards.pop(0)
class Hand:
def __init__(self, dealer=False):
self.dealer = dealer
self.cards = []
self.value = 0
def add_card(self, card):
self.cards.append(card)
def calculate_value(self):
self.value = 0
number_of_aces = 0
for card in self.cards:
if card.value.isnumeric():
self.value += int(card.value)
else:
if card.value == "A":
number_of_aces += 1
self.value += 11
else:
self.value += 10
while 12 > number_of_aces > 0 and self.value > 21:
self.value -= 10
number_of_aces -= 1
return self.value
def get_value(self):
self.calculate_value()
return self.value
def display(self):
if self.dealer:
print("hidden")
print(self.cards[1])
else:
for card in self.cards:
print(card)
print("Value:", self.get_value())
def final_display(self):
for card in self.cards:
print(card)
print("Value:", self.get_value())
def is_busted(self): # check if value is > 21
return self.get_value() > 21
def can_split(self):
return self.cards[0].value == self.cards[1].value
def can_not_split(self):
return self.cards[0].value != self.cards[1].value
def is_push(self, other):
return self.get_value() == other.get_value()
def player_win(self, other):
return self.get_value() > other.get_value()
def player_loss(self, other):
return self.get_value() < other.get_value()
def check_for_blackjack(self):
return self.get_value() == 21 and len(self.cards) == 2
class Game:
def print_status(self, status: Game_Status):
if status == Game_Status.WIN:
print(" you win ! ")
elif status == Game_Status.LOSE:
print(" you lose !")
elif status == Game_Status.PUSH:
print(" push !")
def play(self):
playing = True
while playing:
self.deck = Deck()
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
self.first_hand = Hand()
self.second_hand = Hand()
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
print("Your hand is:")
self.player_hand.display()
print()
print("Dealer's hand is:")
self.dealer_hand.display()
game_over = False
can_play_double_down = True
while not game_over:
player_has_blackjack = self.player_hand.check_for_blackjack()
dealer_has_blackjack = self.dealer_hand.check_for_blackjack()
if player_has_blackjack or dealer_has_blackjack:
self.show_blackjack_results(player_has_blackjack, dealer_has_blackjack)
break
choice = input("Please choose [Hit / Stand / DoubleDown/ Split] by typing the option").lower()
while choice not in ["h", "s", "d", "p", "hit", "stand", "doubledown", "split"]:
choice = input("Please enter 'hit' or 'stand' or 'doubledown' or 'split' (or H/S/D/p) ").lower()
if choice in ['hit', 'h']:
self.player_hand.add_card(self.deck.deal())
self.player_hand.display()
can_play_double_down = False
if self.player_hand.is_busted():
print("You have lost!")
game_over = True
elif choice in ["stand", "s"]:
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
print("Final Results")
print("Your hand:", self.player_hand.get_value())
print("Dealer's hand:", self.dealer_hand.get_value())
if self.player_hand.is_busted():
self.print_status(Game_Status.LOSE)
elif self.dealer_hand.is_busted():
self.print_status(Game_Status.WIN)
elif self.player_hand.player_win(self.dealer_hand):
self.print_status(Game_Status.WIN)
elif self.player_hand.is_push(self.dealer_hand):
self.print_status(Game_Status.PUSH)
elif self.player_hand.player_loss(self.dealer_hand):
self.print_status(Game_Status.LOSE)
self.display_result()
game_over = True
elif choice in [" doubledown ", "d"] and can_play_double_down:
self.player_hand.add_card(self.deck.deal())
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
if self.player_hand.is_busted():
self.print_status(Game_Status.LOSE)
elif self.dealer_hand.is_busted():
self.print_status(Game_Status.WIN)
elif self.player_hand.player_win(self.dealer_hand):
self.print_status(Game_Status.WIN)
elif self.player_hand.player_loss(self.dealer_hand):
self.print_status(Game_Status.LOSE)
elif self.player_hand.is_push(self.dealer_hand):
self.print_status(Game_Status.PUSH)
self.display_result()
game_over = True
elif choice in [" doubledown ", "d"] and not can_play_double_down:
print("you can not play double down")
elif choice in [" split ", "p"] and self.player_hand.can_split():
first_card = Card(self.player_hand.cards[0].suit, self.player_hand.cards[0].value)
second_card = Card(self.player_hand.cards[1].suit, self.player_hand.cards[1].value)
self.first_hand.add_card(first_card)
self.second_hand.add_card(second_card)
self.first_hand.add_card(self.deck.deal())
self.second_hand.add_card(self.deck.deal())
print("your first hand : ")
self.first_hand.final_display()
print("your second hand : ")
self.second_hand.final_display()
not_finish_first_loop = True
while not_finish_first_loop:
first_choice = input("Please choose [Hit / stand] for your first hand ").lower()
while first_choice not in ["h", "s", "hit", "stand"]:
first_choice = input("Please enter 'hit' or 'stand' (or H/S) for the first hand ").lower()
if first_choice in ['hit', 'h']:
self.first_hand.add_card(self.deck.deal())
self.first_hand.display()
if self.first_hand.is_busted():
print("You have lost in your first hand!")
not_finish_first_loop = False
else:
not_finish_first_loop = False
not_finish_second_loop = True
while not_finish_second_loop:
second_choice = input("Please choose [Hit / stand] for your second hand ").lower()
while second_choice not in ["h", "s", "hit", "stand"]:
second_choice = input("Please enter 'hit' or 'stand' (or H/S) for the second hand ").lower()
if second_choice in ['hit', 'h']:
self.second_hand.add_card(self.deck.deal())
self.second_hand.display()
if self.second_hand.is_busted():
print("You have lost in your second hand!")
not_finish_first_loop = False
else:
not_finish_second_loop = False
if not not_finish_first_loop and not not_finish_second_loop:
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
if self.dealer_hand.is_busted():
print("Final Results")
self.first_hand.final_display()
self.second_hand.final_display()
self.dealer_hand.final_display()
print(" you win in both hands")
game_over = True
else:
print("Final Results")
print("Your first hand:", self.first_hand.get_value())
print("Your second hand:", self.second_hand.get_value())
print("Dealer's hand:", self.dealer_hand.get_value())
if self.first_hand.is_busted():
print("you lost your first hand , your hand is over 21")
elif self.first_hand.player_win(self.dealer_hand):
print("You Win in your first hand!")
elif self.first_hand.player_loss(self.dealer_hand):
print("you lost your first hand ")
elif self.first_hand.is_push(self.dealer_hand):
print("push in the first hand!")
if self.second_hand.is_busted():
print("you lost your first hand , your hand is over 21")
elif self.second_hand.player_loss(self.dealer_hand):
print("you lost your second hand ")
elif self.second_hand.player_win(self.dealer_hand):
print("You Win in your second hand!")
elif self.second_hand.is_push(self.dealer_hand):
print("push in the second hand!")
game_over = True
elif choice in [" split ", "p"] and self.player_hand.can_not_split():
print(" no you can not splet")
again = input("Play Again? [Y/N] ")
while again.lower() not in ["y", "n"]:
again = input("Please enter Y or N ")
if again.lower() == "n":
print("Thanks for playing!")
playing = False
else:
playing = True
def display_result(self):
print("player hand")
self.player_hand.final_display()
print("dealer hand")
self.dealer_hand.final_display()
def show_blackjack_results(self, player_has_blackjack, dealer_has_blackjack):
if player_has_blackjack and dealer_has_blackjack:
print("Both players have blackjack! Draw!")
elif player_has_blackjack:
print("You have blackjack! You win!")
elif dealer_has_blackjack:
print("Dealer has blackjack! Dealer wins!")
class Result:
def __init__(self, dealer_card, player_hand_value):
self.dealer_card = dealer_card
self.player_hand_value = player_hand_value
self.hit_win_count = 0
self.hit_loss_count = 0
self.hit_draw_count = 0
self.stand_win_count = 0
self.stand_loss_count = 0
self.stand_draw_count = 0
class Simulation:
def __init__(self):
self.results = []
self.deck = Deck()
def simulation_rounds(self, num_of_rounds):
self.start = time()
for round in range(num_of_rounds):
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.player_hand.get_value() < 11:
self.player_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
dealer_up_card = self.dealer_hand.cards[0].value
actions = ["h", "s"]
random.shuffle(actions)
choice = actions.pop(0)
if choice in ['h'] and player_hand_value != 21:
self.player_hand.add_card(self.deck.deal())
self.calculateResult('h', dealer_up_card, player_hand_value)
else:
self.calculateResult('s', dealer_up_card, player_hand_value)
self.display_result()
def calculateResult(self, action, dealer_up_card, player_hand_value):
result = self.if_there(dealer_up_card, player_hand_value)
if result is None:
result = Result(dealer_up_card, player_hand_value)
self.results.append(result)
if self.player_hand.is_busted():
if action == 'h':
result.hit_loss_count += 1
else:
result.stand_loss_count += 1
elif self.dealer_hand.is_busted():
if action == 'h':
result.hit_win_count += 1
else:
result.stand_win_count += 1
elif self.player_hand.check_for_blackjack():
result.stand_win_count += 1
elif self.player_hand.player_win(self.dealer_hand):
if action == 'h':
result.hit_win_count += 1
else:
result.stand_win_count += 1
elif self.player_hand.is_push(self.dealer_hand):
if action == 'h':
result.hit_draw_count += 1
else:
result.stand_draw_count += 1
elif self.player_hand.player_loss(self.dealer_hand):
if action == 'h':
result.hit_loss_count += 1
else:
result.stand_loss_count += 1
def if_there(self, dealer_up_card, player_hand_value):
if len(self.results) > 0:
for result in self.results:
if result.dealer_card == dealer_up_card and result.player_hand_value == player_hand_value:
return result
return None
def display_result(self):
self.results.sort(key=lambda x: x.dealer_card)
self.results.sort(key=lambda x: x.player_hand_value)
total_wins = 0
total_loss = 0
total_push = 0
total_hit_win = 0
total_hit_loss = 0
total_hit_push = 0
total_stand_win = 0
total_stand_loss = 0
total_stand_push = 0
counter = 1
dash = '-' * 118
print(dash)
print('{:<12s}{:>12s}{:>19s}{:>12s}{:>12s}{:>9s}{:>13s}{:>14s}{:>8}'.format("Counter", "Player Card Value",
"Dealer Up Card", "Hit Win",
"Hit Lose",
"Push", "Stand win",
"Stand Loss", "Push"))
print(dash)
for result in self.results:
print('{:>1}{:>20}{:>20}{:>15}{:>12}{:>12}{:>10}{:>13}{:>12}'.format(counter, result.player_hand_value,
result.dealer_card,
result.hit_win_count,
result.hit_loss_count,
result.hit_draw_count,
result.stand_win_count,
result.stand_loss_count,
result.stand_draw_count))
counter += 1
total_wins += result.hit_win_count + result.stand_win_count
total_loss += result.hit_loss_count + result.stand_loss_count
total_push += result.hit_draw_count + result.stand_draw_count
total_hit_win += result.hit_win_count
total_hit_loss += result.hit_loss_count
total_hit_push += result.hit_draw_count
total_stand_win += result.stand_win_count
total_stand_loss += result.stand_loss_count
total_stand_push += result.hit_draw_count
total = total_wins + total_loss + total_push
print("total wins :", total_wins)
print("total loss :", total_loss)
print("total push :", total_push)
print("total :", total)
print()
print("----------- details ------------")
print("total hit wis :", total_hit_win)
print("total hit loss :", total_hit_loss)
print("total hit push :", total_hit_push)
print("total stand wis :", total_stand_win)
print("total stand loss :", total_stand_loss)
print("total stand push :", total_stand_push)
self.end = time()
print("time " + str(self.end - self.start) )
class OurStrategy(Simulation):
def __init__(self):
super().__init__()
self.results = []
self.deck = Deck()
def simulation_rounds(self, num_of_rounds):
self.start = time()
for round in range(num_of_rounds):
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.player_hand.get_value() < 11:
self.player_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
dealer_up_card = self.dealer_hand.cards[0].value
if (player_hand_value == 11 and dealer_up_card in ["2", "4", "5", "6", "7", "8", "9", "10"]) or \
(player_hand_value == 12 and dealer_up_card in ["2", "7", "8", "9", "10", "A"]) or \
(player_hand_value == 13 and dealer_up_card in ["5", "7", "8", "9"]) or \
(player_hand_value == 14 and dealer_up_card == "9") or \
(player_hand_value == 15 and dealer_up_card == "A"):
self.player_hand.add_card(self.deck.deal())
self.calculateResult('h', dealer_up_card, player_hand_value)
else:
self.calculateResult('s', dealer_up_card, player_hand_value)
self.display_result()
if __name__ == "__main__":
x = Simulation()
x.simulation_rounds(1000000)
s = OurStrategy()
s.simulation_rounds(1000000) | 39.623327 | 120 | 0.509917 |
import random
from enum import Enum
from time import time
class Game_Status(Enum):
WIN = 1
LOSE = 2
PUSH = 3
class Card:
def __init__(self, suit, value):
self.suit = suit
self.value = value
def __repr__(self):
return " of ".join((self.value, self.suit))
class Deck:
def __init__(self):
self.cards = [Card(s, v) for s in ["Spades", "Clubs", "Hearts",
"Diamonds"] for v in ["A", "2", "3", "4", "5", "6",
"7", "8", "9", "10", "10", "10", "10"]] * 6
random.shuffle(self.cards)
def deal(self):
if len(self.cards) > 1:
return self.cards.pop(0)
else:
self.__init__()
return self.cards.pop(0)
class Hand:
def __init__(self, dealer=False):
self.dealer = dealer
self.cards = []
self.value = 0
def add_card(self, card):
self.cards.append(card)
def calculate_value(self):
self.value = 0
number_of_aces = 0
for card in self.cards:
if card.value.isnumeric():
self.value += int(card.value)
else:
if card.value == "A":
number_of_aces += 1
self.value += 11
else:
self.value += 10
while 12 > number_of_aces > 0 and self.value > 21:
self.value -= 10
number_of_aces -= 1
return self.value
def get_value(self):
self.calculate_value()
return self.value
def display(self):
if self.dealer:
print("hidden")
print(self.cards[1])
else:
for card in self.cards:
print(card)
print("Value:", self.get_value())
def final_display(self):
for card in self.cards:
print(card)
print("Value:", self.get_value())
def is_busted(self):
return self.get_value() > 21
def can_split(self):
return self.cards[0].value == self.cards[1].value
def can_not_split(self):
return self.cards[0].value != self.cards[1].value
def is_push(self, other):
return self.get_value() == other.get_value()
def player_win(self, other):
return self.get_value() > other.get_value()
def player_loss(self, other):
return self.get_value() < other.get_value()
def check_for_blackjack(self):
return self.get_value() == 21 and len(self.cards) == 2
class Game:
def print_status(self, status: Game_Status):
if status == Game_Status.WIN:
print(" you win ! ")
elif status == Game_Status.LOSE:
print(" you lose !")
elif status == Game_Status.PUSH:
print(" push !")
def play(self):
playing = True
while playing:
self.deck = Deck()
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
self.first_hand = Hand()
self.second_hand = Hand()
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
print("Your hand is:")
self.player_hand.display()
print()
print("Dealer's hand is:")
self.dealer_hand.display()
game_over = False
can_play_double_down = True
while not game_over:
player_has_blackjack = self.player_hand.check_for_blackjack()
dealer_has_blackjack = self.dealer_hand.check_for_blackjack()
if player_has_blackjack or dealer_has_blackjack:
self.show_blackjack_results(player_has_blackjack, dealer_has_blackjack)
break
choice = input("Please choose [Hit / Stand / DoubleDown/ Split] by typing the option").lower()
while choice not in ["h", "s", "d", "p", "hit", "stand", "doubledown", "split"]:
choice = input("Please enter 'hit' or 'stand' or 'doubledown' or 'split' (or H/S/D/p) ").lower()
if choice in ['hit', 'h']:
self.player_hand.add_card(self.deck.deal())
self.player_hand.display()
can_play_double_down = False
if self.player_hand.is_busted():
print("You have lost!")
game_over = True
elif choice in ["stand", "s"]:
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
print("Final Results")
print("Your hand:", self.player_hand.get_value())
print("Dealer's hand:", self.dealer_hand.get_value())
if self.player_hand.is_busted():
self.print_status(Game_Status.LOSE)
elif self.dealer_hand.is_busted():
self.print_status(Game_Status.WIN)
elif self.player_hand.player_win(self.dealer_hand):
self.print_status(Game_Status.WIN)
elif self.player_hand.is_push(self.dealer_hand):
self.print_status(Game_Status.PUSH)
elif self.player_hand.player_loss(self.dealer_hand):
self.print_status(Game_Status.LOSE)
self.display_result()
game_over = True
elif choice in [" doubledown ", "d"] and can_play_double_down:
self.player_hand.add_card(self.deck.deal())
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
if self.player_hand.is_busted():
self.print_status(Game_Status.LOSE)
elif self.dealer_hand.is_busted():
self.print_status(Game_Status.WIN)
elif self.player_hand.player_win(self.dealer_hand):
self.print_status(Game_Status.WIN)
elif self.player_hand.player_loss(self.dealer_hand):
self.print_status(Game_Status.LOSE)
elif self.player_hand.is_push(self.dealer_hand):
self.print_status(Game_Status.PUSH)
self.display_result()
game_over = True
elif choice in [" doubledown ", "d"] and not can_play_double_down:
print("you can not play double down")
elif choice in [" split ", "p"] and self.player_hand.can_split():
first_card = Card(self.player_hand.cards[0].suit, self.player_hand.cards[0].value)
second_card = Card(self.player_hand.cards[1].suit, self.player_hand.cards[1].value)
self.first_hand.add_card(first_card)
self.second_hand.add_card(second_card)
self.first_hand.add_card(self.deck.deal())
self.second_hand.add_card(self.deck.deal())
print("your first hand : ")
self.first_hand.final_display()
print("your second hand : ")
self.second_hand.final_display()
not_finish_first_loop = True
while not_finish_first_loop:
first_choice = input("Please choose [Hit / stand] for your first hand ").lower()
while first_choice not in ["h", "s", "hit", "stand"]:
first_choice = input("Please enter 'hit' or 'stand' (or H/S) for the first hand ").lower()
if first_choice in ['hit', 'h']:
self.first_hand.add_card(self.deck.deal())
self.first_hand.display()
if self.first_hand.is_busted():
print("You have lost in your first hand!")
not_finish_first_loop = False
else:
not_finish_first_loop = False
not_finish_second_loop = True
while not_finish_second_loop:
second_choice = input("Please choose [Hit / stand] for your second hand ").lower()
while second_choice not in ["h", "s", "hit", "stand"]:
second_choice = input("Please enter 'hit' or 'stand' (or H/S) for the second hand ").lower()
if second_choice in ['hit', 'h']:
self.second_hand.add_card(self.deck.deal())
self.second_hand.display()
if self.second_hand.is_busted():
print("You have lost in your second hand!")
not_finish_first_loop = False
else:
not_finish_second_loop = False
if not not_finish_first_loop and not not_finish_second_loop:
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
if self.dealer_hand.is_busted():
print("Final Results")
self.first_hand.final_display()
self.second_hand.final_display()
self.dealer_hand.final_display()
print(" you win in both hands")
game_over = True
else:
print("Final Results")
print("Your first hand:", self.first_hand.get_value())
print("Your second hand:", self.second_hand.get_value())
print("Dealer's hand:", self.dealer_hand.get_value())
if self.first_hand.is_busted():
print("you lost your first hand , your hand is over 21")
elif self.first_hand.player_win(self.dealer_hand):
print("You Win in your first hand!")
elif self.first_hand.player_loss(self.dealer_hand):
print("you lost your first hand ")
elif self.first_hand.is_push(self.dealer_hand):
print("push in the first hand!")
if self.second_hand.is_busted():
print("you lost your first hand , your hand is over 21")
elif self.second_hand.player_loss(self.dealer_hand):
print("you lost your second hand ")
elif self.second_hand.player_win(self.dealer_hand):
print("You Win in your second hand!")
elif self.second_hand.is_push(self.dealer_hand):
print("push in the second hand!")
game_over = True
elif choice in [" split ", "p"] and self.player_hand.can_not_split():
print(" no you can not splet")
again = input("Play Again? [Y/N] ")
while again.lower() not in ["y", "n"]:
again = input("Please enter Y or N ")
if again.lower() == "n":
print("Thanks for playing!")
playing = False
else:
playing = True
def display_result(self):
print("player hand")
self.player_hand.final_display()
print("dealer hand")
self.dealer_hand.final_display()
def show_blackjack_results(self, player_has_blackjack, dealer_has_blackjack):
if player_has_blackjack and dealer_has_blackjack:
print("Both players have blackjack! Draw!")
elif player_has_blackjack:
print("You have blackjack! You win!")
elif dealer_has_blackjack:
print("Dealer has blackjack! Dealer wins!")
class Result:
def __init__(self, dealer_card, player_hand_value):
self.dealer_card = dealer_card
self.player_hand_value = player_hand_value
self.hit_win_count = 0
self.hit_loss_count = 0
self.hit_draw_count = 0
self.stand_win_count = 0
self.stand_loss_count = 0
self.stand_draw_count = 0
class Simulation:
def __init__(self):
self.results = []
self.deck = Deck()
def simulation_rounds(self, num_of_rounds):
self.start = time()
for round in range(num_of_rounds):
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.player_hand.get_value() < 11:
self.player_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
dealer_up_card = self.dealer_hand.cards[0].value
actions = ["h", "s"]
random.shuffle(actions)
choice = actions.pop(0)
if choice in ['h'] and player_hand_value != 21:
self.player_hand.add_card(self.deck.deal())
self.calculateResult('h', dealer_up_card, player_hand_value)
else:
self.calculateResult('s', dealer_up_card, player_hand_value)
self.display_result()
def calculateResult(self, action, dealer_up_card, player_hand_value):
result = self.if_there(dealer_up_card, player_hand_value)
if result is None:
result = Result(dealer_up_card, player_hand_value)
self.results.append(result)
if self.player_hand.is_busted():
if action == 'h':
result.hit_loss_count += 1
else:
result.stand_loss_count += 1
elif self.dealer_hand.is_busted():
if action == 'h':
result.hit_win_count += 1
else:
result.stand_win_count += 1
elif self.player_hand.check_for_blackjack():
result.stand_win_count += 1
elif self.player_hand.player_win(self.dealer_hand):
if action == 'h':
result.hit_win_count += 1
else:
result.stand_win_count += 1
elif self.player_hand.is_push(self.dealer_hand):
if action == 'h':
result.hit_draw_count += 1
else:
result.stand_draw_count += 1
elif self.player_hand.player_loss(self.dealer_hand):
if action == 'h':
result.hit_loss_count += 1
else:
result.stand_loss_count += 1
def if_there(self, dealer_up_card, player_hand_value):
if len(self.results) > 0:
for result in self.results:
if result.dealer_card == dealer_up_card and result.player_hand_value == player_hand_value:
return result
return None
def display_result(self):
self.results.sort(key=lambda x: x.dealer_card)
self.results.sort(key=lambda x: x.player_hand_value)
total_wins = 0
total_loss = 0
total_push = 0
total_hit_win = 0
total_hit_loss = 0
total_hit_push = 0
total_stand_win = 0
total_stand_loss = 0
total_stand_push = 0
counter = 1
dash = '-' * 118
print(dash)
print('{:<12s}{:>12s}{:>19s}{:>12s}{:>12s}{:>9s}{:>13s}{:>14s}{:>8}'.format("Counter", "Player Card Value",
"Dealer Up Card", "Hit Win",
"Hit Lose",
"Push", "Stand win",
"Stand Loss", "Push"))
print(dash)
for result in self.results:
print('{:>1}{:>20}{:>20}{:>15}{:>12}{:>12}{:>10}{:>13}{:>12}'.format(counter, result.player_hand_value,
result.dealer_card,
result.hit_win_count,
result.hit_loss_count,
result.hit_draw_count,
result.stand_win_count,
result.stand_loss_count,
result.stand_draw_count))
counter += 1
total_wins += result.hit_win_count + result.stand_win_count
total_loss += result.hit_loss_count + result.stand_loss_count
total_push += result.hit_draw_count + result.stand_draw_count
total_hit_win += result.hit_win_count
total_hit_loss += result.hit_loss_count
total_hit_push += result.hit_draw_count
total_stand_win += result.stand_win_count
total_stand_loss += result.stand_loss_count
total_stand_push += result.hit_draw_count
total = total_wins + total_loss + total_push
print("total wins :", total_wins)
print("total loss :", total_loss)
print("total push :", total_push)
print("total :", total)
print()
print("----------- details ------------")
print("total hit wis :", total_hit_win)
print("total hit loss :", total_hit_loss)
print("total hit push :", total_hit_push)
print("total stand wis :", total_stand_win)
print("total stand loss :", total_stand_loss)
print("total stand push :", total_stand_push)
self.end = time()
print("time " + str(self.end - self.start) )
class OurStrategy(Simulation):
def __init__(self):
super().__init__()
self.results = []
self.deck = Deck()
def simulation_rounds(self, num_of_rounds):
self.start = time()
for round in range(num_of_rounds):
self.player_hand = Hand()
self.dealer_hand = Hand(dealer=True)
for i in range(2):
self.player_hand.add_card(self.deck.deal())
self.dealer_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.player_hand.get_value() < 11:
self.player_hand.add_card(self.deck.deal())
player_hand_value = self.player_hand.get_value()
while self.dealer_hand.get_value() < 17:
self.dealer_hand.add_card(self.deck.deal())
dealer_up_card = self.dealer_hand.cards[0].value
if (player_hand_value == 11 and dealer_up_card in ["2", "4", "5", "6", "7", "8", "9", "10"]) or \
(player_hand_value == 12 and dealer_up_card in ["2", "7", "8", "9", "10", "A"]) or \
(player_hand_value == 13 and dealer_up_card in ["5", "7", "8", "9"]) or \
(player_hand_value == 14 and dealer_up_card == "9") or \
(player_hand_value == 15 and dealer_up_card == "A"):
self.player_hand.add_card(self.deck.deal())
self.calculateResult('h', dealer_up_card, player_hand_value)
else:
self.calculateResult('s', dealer_up_card, player_hand_value)
self.display_result()
if __name__ == "__main__":
x = Simulation()
x.simulation_rounds(1000000)
s = OurStrategy()
s.simulation_rounds(1000000) | true | true |
f73023b517d0845ea1d7905c59222d02f1e64c12 | 362 | py | Python | transcription_folder_to_sclite_hyp.py | c0louri/kaldi-grpc-server | d0f6881099423e6d08df74dc4217ddf3f43621a2 | [
"Apache-2.0"
] | null | null | null | transcription_folder_to_sclite_hyp.py | c0louri/kaldi-grpc-server | d0f6881099423e6d08df74dc4217ddf3f43621a2 | [
"Apache-2.0"
] | null | null | null | transcription_folder_to_sclite_hyp.py | c0louri/kaldi-grpc-server | d0f6881099423e6d08df74dc4217ddf3f43621a2 | [
"Apache-2.0"
] | null | null | null | import fileinput
import os
def to_sclite_line(trans):
with open(trans, "r") as fd:
hyp = fd.read()
_id, _ = os.path.splitext(os.path.basename(trans))
return f"{hyp} ({_id})"
def main():
with fileinput.input() as finput:
for ln in finput:
print(to_sclite_line(ln.strip()))
if __name__ == "__main__":
main()
| 16.454545 | 54 | 0.596685 | import fileinput
import os
def to_sclite_line(trans):
with open(trans, "r") as fd:
hyp = fd.read()
_id, _ = os.path.splitext(os.path.basename(trans))
return f"{hyp} ({_id})"
def main():
with fileinput.input() as finput:
for ln in finput:
print(to_sclite_line(ln.strip()))
if __name__ == "__main__":
main()
| true | true |
f73024ea71ed1bb25989143787154d6617e53ae0 | 18,051 | py | Python | sdk/python/pulumi_azure_native/recoveryservices/v20210601/protection_container.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20210601/protection_container.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/recoveryservices/v20210601/protection_container.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ProtectionContainerArgs', 'ProtectionContainer']
@pulumi.input_type
class ProtectionContainerArgs:
def __init__(__self__, *,
fabric_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
vault_name: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ProtectionContainer resource.
:param pulumi.Input[str] fabric_name: Fabric name associated with the container.
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[str] vault_name: The name of the recovery services vault.
:param pulumi.Input[str] container_name: Name of the container to be registered.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']] properties: ProtectionContainerResource properties
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "fabric_name", fabric_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vault_name", vault_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> pulumi.Input[str]:
"""
Fabric name associated with the container.
"""
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group where the recovery services vault is present.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Input[str]:
"""
The name of the recovery services vault.
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the container to be registered.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]]:
"""
ProtectionContainerResource properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ProtectionContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureBackupServerContainerArgs'], pulumi.InputType['AzureIaaSClassicComputeVMContainerArgs'], pulumi.InputType['AzureIaaSComputeVMContainerArgs'], pulumi.InputType['AzureSQLAGWorkloadContainerProtectionContainerArgs'], pulumi.InputType['AzureSqlContainerArgs'], pulumi.InputType['AzureStorageContainerArgs'], pulumi.InputType['AzureVMAppContainerProtectionContainerArgs'], pulumi.InputType['AzureWorkloadContainerArgs'], pulumi.InputType['DpmContainerArgs'], pulumi.InputType['GenericContainerArgs'], pulumi.InputType['IaaSVMContainerArgs'], pulumi.InputType['MabContainerArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Base class for container with backup items. Containers with specific workloads are derived from this class.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_name: Name of the container to be registered.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[str] fabric_name: Fabric name associated with the container.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Union[pulumi.InputType['AzureBackupServerContainerArgs'], pulumi.InputType['AzureIaaSClassicComputeVMContainerArgs'], pulumi.InputType['AzureIaaSComputeVMContainerArgs'], pulumi.InputType['AzureSQLAGWorkloadContainerProtectionContainerArgs'], pulumi.InputType['AzureSqlContainerArgs'], pulumi.InputType['AzureStorageContainerArgs'], pulumi.InputType['AzureVMAppContainerProtectionContainerArgs'], pulumi.InputType['AzureWorkloadContainerArgs'], pulumi.InputType['DpmContainerArgs'], pulumi.InputType['GenericContainerArgs'], pulumi.InputType['IaaSVMContainerArgs'], pulumi.InputType['MabContainerArgs']]] properties: ProtectionContainerResource properties
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vault_name: The name of the recovery services vault.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProtectionContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Base class for container with backup items. Containers with specific workloads are derived from this class.
:param str resource_name: The name of the resource.
:param ProtectionContainerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProtectionContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureBackupServerContainerArgs'], pulumi.InputType['AzureIaaSClassicComputeVMContainerArgs'], pulumi.InputType['AzureIaaSComputeVMContainerArgs'], pulumi.InputType['AzureSQLAGWorkloadContainerProtectionContainerArgs'], pulumi.InputType['AzureSqlContainerArgs'], pulumi.InputType['AzureStorageContainerArgs'], pulumi.InputType['AzureVMAppContainerProtectionContainerArgs'], pulumi.InputType['AzureWorkloadContainerArgs'], pulumi.InputType['DpmContainerArgs'], pulumi.InputType['GenericContainerArgs'], pulumi.InputType['IaaSVMContainerArgs'], pulumi.InputType['MabContainerArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProtectionContainerArgs.__new__(ProtectionContainerArgs)
__props__.__dict__["container_name"] = container_name
__props__.__dict__["e_tag"] = e_tag
if fabric_name is None and not opts.urn:
raise TypeError("Missing required property 'fabric_name'")
__props__.__dict__["fabric_name"] = fabric_name
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
if vault_name is None and not opts.urn:
raise TypeError("Missing required property 'vault_name'")
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210601:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20161201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20161201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20201001:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201001:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20201201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210101:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210101:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201preview:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201preview:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210210:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210210:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210301:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210301:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210401:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210401:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210701:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210701:ProtectionContainer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ProtectionContainer, __self__).__init__(
'azure-native:recoveryservices/v20210601:ProtectionContainer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ProtectionContainer':
"""
Get an existing ProtectionContainer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ProtectionContainerArgs.__new__(ProtectionContainerArgs)
__props__.__dict__["e_tag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ProtectionContainer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
ProtectionContainerResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
| 56.943218 | 1,968 | 0.70046 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ProtectionContainerArgs', 'ProtectionContainer']
@pulumi.input_type
class ProtectionContainerArgs:
def __init__(__self__, *,
fabric_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
vault_name: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "fabric_name", fabric_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "vault_name", vault_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]]:
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Union['AzureBackupServerContainerArgs', 'AzureIaaSClassicComputeVMContainerArgs', 'AzureIaaSComputeVMContainerArgs', 'AzureSQLAGWorkloadContainerProtectionContainerArgs', 'AzureSqlContainerArgs', 'AzureStorageContainerArgs', 'AzureVMAppContainerProtectionContainerArgs', 'AzureWorkloadContainerArgs', 'DpmContainerArgs', 'GenericContainerArgs', 'IaaSVMContainerArgs', 'MabContainerArgs']]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ProtectionContainer(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureBackupServerContainerArgs'], pulumi.InputType['AzureIaaSClassicComputeVMContainerArgs'], pulumi.InputType['AzureIaaSComputeVMContainerArgs'], pulumi.InputType['AzureSQLAGWorkloadContainerProtectionContainerArgs'], pulumi.InputType['AzureSqlContainerArgs'], pulumi.InputType['AzureStorageContainerArgs'], pulumi.InputType['AzureVMAppContainerProtectionContainerArgs'], pulumi.InputType['AzureWorkloadContainerArgs'], pulumi.InputType['DpmContainerArgs'], pulumi.InputType['GenericContainerArgs'], pulumi.InputType['IaaSVMContainerArgs'], pulumi.InputType['MabContainerArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ProtectionContainerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProtectionContainerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_name: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AzureBackupServerContainerArgs'], pulumi.InputType['AzureIaaSClassicComputeVMContainerArgs'], pulumi.InputType['AzureIaaSComputeVMContainerArgs'], pulumi.InputType['AzureSQLAGWorkloadContainerProtectionContainerArgs'], pulumi.InputType['AzureSqlContainerArgs'], pulumi.InputType['AzureStorageContainerArgs'], pulumi.InputType['AzureVMAppContainerProtectionContainerArgs'], pulumi.InputType['AzureWorkloadContainerArgs'], pulumi.InputType['DpmContainerArgs'], pulumi.InputType['GenericContainerArgs'], pulumi.InputType['IaaSVMContainerArgs'], pulumi.InputType['MabContainerArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProtectionContainerArgs.__new__(ProtectionContainerArgs)
__props__.__dict__["container_name"] = container_name
__props__.__dict__["e_tag"] = e_tag
if fabric_name is None and not opts.urn:
raise TypeError("Missing required property 'fabric_name'")
__props__.__dict__["fabric_name"] = fabric_name
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
if vault_name is None and not opts.urn:
raise TypeError("Missing required property 'vault_name'")
__props__.__dict__["vault_name"] = vault_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210601:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20161201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20161201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20201001:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201001:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20201201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20201201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210101:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210101:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210201preview:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210201preview:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210210:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210210:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210301:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210301:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210401:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210401:ProtectionContainer"), pulumi.Alias(type_="azure-native:recoveryservices/v20210701:ProtectionContainer"), pulumi.Alias(type_="azure-nextgen:recoveryservices/v20210701:ProtectionContainer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ProtectionContainer, __self__).__init__(
'azure-native:recoveryservices/v20210601:ProtectionContainer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ProtectionContainer':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ProtectionContainerArgs.__new__(ProtectionContainerArgs)
__props__.__dict__["e_tag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ProtectionContainer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
f73025866ffd701b1513e28d9ce8005b3fbbd1c8 | 12,554 | py | Python | compbio/gff.py | Open-Technology/Computational-Biology | f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7 | [
"MIT"
] | 30 | 2015-05-08T19:21:15.000Z | 2022-03-11T21:30:33.000Z | compbio/gff.py | Open-Technology/Computational-Biology | f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7 | [
"MIT"
] | null | null | null | compbio/gff.py | Open-Technology/Computational-Biology | f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7 | [
"MIT"
] | 8 | 2015-05-08T02:02:33.000Z | 2021-06-10T17:51:03.000Z |
"""
GTF file format
http://genes.cse.wustl.edu/GTF22.html
<seqname> <source> <feature> <start> <end> <score> <strand> <frame> [attributes] [comments]
The following feature types are required: "CDS", "start_codon", "stop_codon".
The features "5UTR", "3UTR", "inter", "inter_CNS", "intron_CNS" and "exon" are
optional. All other features will be ignored. The types must have the correct
capitalization shown here.
<start> <end>
Integer start and end coordinates of the feature relative to the
beginning of the sequence named in <seqname>. <start> must be less than or equal
to <end>. Sequence numbering starts at 1. Values of <start> and <end> that extend
outside the reference sequence are technically acceptable, but they are
discouraged.
<score>
The score field indicates a degree of confidence in the feature's
existence and coordinates. The value of this field has no global scale but may
have relative significance when the <source> field indicates the prediction
program used to create this annotation. It may be a floating point number or
integer, and not necessary and may be replaced with a dot.
<frame>
0 indicates that the feature begins with a whole codon at the 5' most
base. 1 means that there is one extra base (the third base of a codon) before the
first whole codon and 2 means that there are two extra bases (the second and
third bases of the codon) before the first codon. Note that for reverse strand
features, the 5' most base is the <end> coordinate.
GFF3 File Format
http://song.sourceforge.net/gff3.shtml
this format says a feature can have multiple parents
"""
# python imports
import sys
# rasmus imports
from rasmus import util
# compbio imports
from compbio import regionlib
#=============================================================================
# Generic GFF fileformat
#
class Gff (object):
"""Most generic GFF format. Do not assume any format for attributes field"""
def __init__(self):
self.nondata = set(["comment", "source", "score", "frame", "species"])
def format_data(self, region, ignore=set()):
# unparsed attribute
return region.data.get(None, "")
def parse_data(self, text, ignore=set()):
return {None: text}
def read_region(self, line, region=None):
if region == None:
region = regionlib.Region()
# parse comment
pos = line.find("#")
if pos > -1:
region.data["comment"] = line[pos+1:]
line = line[:pos]
# split into columns
tokens = line.split("\t")
assert len(tokens) == 9, Exception("line does not have 9 columns")
# parse fields
region.seqname = tokens[0]
region.feature = tokens[2]
region.start = int(tokens[3])
region.end = int(tokens[4])
# parse strand
strand = tokens[6]
if strand == "+" or strand == "1":
region.strand = 1
elif strand == "-" or strand == "-1":
region.strand = -1
else:
region.strand = 0
# parse source
if tokens[1] != ".":
region.data["source"] = tokens[1]
# parse score
if tokens[5] != ".":
region.data["score"] = float(tokens[5])
# parse frame
if tokens[7] != ".":
region.data["frame"] = int(tokens[7])
# parse attributes
region.data.update(self.parse_data(tokens[8]))
# parse species
region.species = region.data.get("species", "")
return region
def write_region(self, region, out=sys.stdout):
score = str(region.data.get("score", "."))
source = str(region.data.get("source", "."))
if region.strand == 0:
strand = "."
elif region.strand == 1:
strand = "+"
else:
strand = "-"
frame = str(region.data.get("frame", "."))
attr = self.format_data(region)
if "comment" in region.data:
comment = " #%s" % region.data["comment"]
else:
comment = ""
out.write("%s\t%s\t%s\t%d\t%d\t%s\t%s\t%s\t%s%s\n" % \
(region.seqname,
source,
region.feature,
region.start,
region.end,
score,
strand,
frame,
attr,
comment))
def build_hierarchy(self, regions):
"""
Produces a hierachy from a list of regions
Returns list of regions with no parents (roots).
This base class function does nothing. See GFF3
"""
# do nothing
return []
# singleton
GFF = Gff()
#=============================================================================
# GTF fileformat
#
class Gtf (Gff):
def format_data(self, region):
lst = []
if region.species != "":
lst.append('species "%s";' % region.species)
for key, val in region.data.iteritems():
if key not in self.nondata:
lst.append('%s "%s";' % (key, str(val)))
return " ".join(lst)
def parse_data(self, text):
"""Parses an attribute field into a dict of key/value pairs"""
tokens = text.split(";")
data = {}
for attr in tokens[:-1]:
attr = attr.strip()
pos = attr.find(" ")
if pos == -1:
continue
key = attr[:pos]
val = attr[pos+1:].split("\"")[1]
data[key] = val
return data
def build_hierarchy(self, regions):
"""GTF has its own heirarchy system
It is currently not implemented"""
return []
GTF = Gtf()
#=============================================================================
# GFF3 fileformat
#
class Gff3 (Gff):
def format_data(self, region):
lst = []
if region.species != "":
lst.append("species=%s;" % region.species)
for key, val in region.data.iteritems():
if key not in self.nondata:
lst.append('%s=%s;' % (key, str(val)))
return "".join(lst)
def parse_data(self, text):
"""Parses an attribute field into a dict of key/value pairs"""
tokens = text.split(";")
data = {}
for attr in tokens:
attr = attr.strip()
if len(attr) == 0:
continue
pos = attr.index("=")
key = attr[:pos]
val = attr[pos+1:]
data[key] = val
return data
def build_hierarchy(self, regions):
"""
Produces a hierachy from a list of regions
Returns list of regions with no parents (roots).
Assumes ID and Parent attributes are present.
"""
# make a list of regions in case regions is not a list
if not isinstance(regions, list):
regions = list(regions)
# make lookup by id
roots = set()
lookup = {}
for region in regions:
if "ID" in region.data:
lookup[region.data["ID"]] = region
roots.add(region)
# build hierarchy
for region in regions:
if "Parent" in region.data:
parents = region.data["Parent"].split(",")
for parent in parents:
lookup[parent].add_child(region)
roots.remove(region)
# create roots list (regions in same order as they were passed)
regions2 = [x for x in regions if x in roots]
return regions2
GFF3 = Gff3()
#=============================================================================
# Gff Input/Output
#
def read_gff(filename, format=GFF3,
lineFilter=lambda x: True,
regionFilter=lambda x: True):
"""
Read all regions in a GFF file
"""
infile = iterGff(filename,
format,
lineFilter,
regionFilter)
return list(infile)
readGff = read_gff
def write_gff(filename, regions, format=GFF3):
"""
Write regions to a file stream
filename - a filename or file stream
regions - a list of Region objects
"""
out = util.open_stream(filename, "w")
for region in regions:
format.write_region(region, out=out)
writeGff = write_gff
def iter_gff(filename, format=GFF3,
line_filter=lambda x: True,
region_filter=lambda x: True,
# backcompat
lineFilter=None,
regionFilter=None):
"""
Iterate over the regions in a GFF file
"""
if lineFilter is not None:
line_filter = lineFilter
if regionFilter is not None:
region_filter = regionFilter
infile = util.open_stream(filename)
lineno = 0
for line in infile:
lineno += 1
line = line.rstrip("\n")
# only continue processing if line is not comment and passes filter
if len(line) == 0 or line[0] == "#" or not line_filter(line):
continue
# parse region
try:
region = format.read_region(line)
except Exception, e:
raise Exception("%s\nError on line %d: %s" % (e,lineno, line))
# only return region if region passes filter
if region_filter(region):
yield region
iterGff = iter_gff
#
# testing
#
if __name__ == "__main__":
from rasmus.common import *
import re
TEST_GTF = \
"""
140\tTwinscan\tinter\t5141\t8522\t.\t-\t.\tgene_id ""; transcript_id "";
140\tTwinscan\tinter_CNS\t8523\t9711\t.\t-\t.\tgene_id ""; transcript_id "";
"""
TEST_GFF3 = \
"""
chr2\tTwinscan\tmRNA\t5141\t8522\t.\t-\t.\tID=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon1; Parent=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon2; Parent=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon3; Parent=gene1;
"""
TEST_GFF3_2 = \
re.sub(" +", "\t", """
##gff-version 3
##sequence-region ctg123 1 1497228
ctg123 . gene 1000 9000 . + . ID=gene00001;Name=EDEN
ctg123 . TF_binding_site 1000 1012 . + . ID=tfbs00001;Parent=gene00001
ctg123 . mRNA 1050 9000 . + . ID=mRNA00001;Parent=gene00001;Name=EDEN.1
ctg123 . mRNA 1050 9000 . + . ID=mRNA00002;Parent=gene00001;Name=EDEN.2
ctg123 . mRNA 1300 9000 . + . ID=mRNA00003;Parent=gene00001;Name=EDEN.3
ctg123 . exon 1300 1500 . + . ID=exon00001;Parent=mRNA00003
ctg123 . exon 1050 1500 . + . ID=exon00002;Parent=mRNA00001,mRNA00002
ctg123 . exon 3000 3902 . + . ID=exon00003;Parent=mRNA00001,mRNA00003
ctg123 . exon 5000 5500 . + . ID=exon00004;Parent=mRNA00001,mRNA00002,mRNA00003
ctg123 . exon 7000 9000 . + . ID=exon00005;Parent=mRNA00001,mRNA00002,mRNA00003
ctg123 . CDS 1201 1500 . + 0 ID=cds000011;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 3000 3902 . + 0 ID=cds000012;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 5000 5500 . + 0 ID=cds000013;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 7000 7600 . + 0 ID=cds000014;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 1201 1500 . + 0 ID=cds000021;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 5000 5500 . + 0 ID=cds000022;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 7000 7600 . + 0 ID=cds000023;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 3301 3902 . + 0 ID=cds000031;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 5000 5500 . + 2 ID=cds000032;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 7000 7600 . + 2 ID=cds000033;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 3391 3902 . + 0 ID=cds000041;Parent=mRNA00003;Name=edenprotein.4
ctg123 . CDS 5000 5500 . + 2 ID=cds000042;Parent=mRNA00003;Name=edenprotein.4
Ctg123 . CDS 7000 7600 . + 2 ID=cds000043;Parent=mRNA00003;Name=edenprotein.4
""")
regions = read_gff(strStream(TEST_GFF3_2), format=GFF3)
regions2 = GFF3.build_hierarchy(regions)
print regions2
print regions2[0]
pc(read_gff(strStream(TEST_GTF)))
| 28.926267 | 95 | 0.558945 |
"""
GTF file format
http://genes.cse.wustl.edu/GTF22.html
<seqname> <source> <feature> <start> <end> <score> <strand> <frame> [attributes] [comments]
The following feature types are required: "CDS", "start_codon", "stop_codon".
The features "5UTR", "3UTR", "inter", "inter_CNS", "intron_CNS" and "exon" are
optional. All other features will be ignored. The types must have the correct
capitalization shown here.
<start> <end>
Integer start and end coordinates of the feature relative to the
beginning of the sequence named in <seqname>. <start> must be less than or equal
to <end>. Sequence numbering starts at 1. Values of <start> and <end> that extend
outside the reference sequence are technically acceptable, but they are
discouraged.
<score>
The score field indicates a degree of confidence in the feature's
existence and coordinates. The value of this field has no global scale but may
have relative significance when the <source> field indicates the prediction
program used to create this annotation. It may be a floating point number or
integer, and not necessary and may be replaced with a dot.
<frame>
0 indicates that the feature begins with a whole codon at the 5' most
base. 1 means that there is one extra base (the third base of a codon) before the
first whole codon and 2 means that there are two extra bases (the second and
third bases of the codon) before the first codon. Note that for reverse strand
features, the 5' most base is the <end> coordinate.
GFF3 File Format
http://song.sourceforge.net/gff3.shtml
this format says a feature can have multiple parents
"""
# python imports
import sys
# rasmus imports
from rasmus import util
# compbio imports
from compbio import regionlib
#=============================================================================
# Generic GFF fileformat
#
class Gff (object):
"""Most generic GFF format. Do not assume any format for attributes field"""
def __init__(self):
self.nondata = set(["comment", "source", "score", "frame", "species"])
def format_data(self, region, ignore=set()):
# unparsed attribute
return region.data.get(None, "")
def parse_data(self, text, ignore=set()):
return {None: text}
def read_region(self, line, region=None):
if region == None:
region = regionlib.Region()
# parse comment
pos = line.find("#")
if pos > -1:
region.data["comment"] = line[pos+1:]
line = line[:pos]
# split into columns
tokens = line.split("\t")
assert len(tokens) == 9, Exception("line does not have 9 columns")
# parse fields
region.seqname = tokens[0]
region.feature = tokens[2]
region.start = int(tokens[3])
region.end = int(tokens[4])
# parse strand
strand = tokens[6]
if strand == "+" or strand == "1":
region.strand = 1
elif strand == "-" or strand == "-1":
region.strand = -1
else:
region.strand = 0
# parse source
if tokens[1] != ".":
region.data["source"] = tokens[1]
# parse score
if tokens[5] != ".":
region.data["score"] = float(tokens[5])
# parse frame
if tokens[7] != ".":
region.data["frame"] = int(tokens[7])
# parse attributes
region.data.update(self.parse_data(tokens[8]))
# parse species
region.species = region.data.get("species", "")
return region
def write_region(self, region, out=sys.stdout):
score = str(region.data.get("score", "."))
source = str(region.data.get("source", "."))
if region.strand == 0:
strand = "."
elif region.strand == 1:
strand = "+"
else:
strand = "-"
frame = str(region.data.get("frame", "."))
attr = self.format_data(region)
if "comment" in region.data:
comment = " #%s" % region.data["comment"]
else:
comment = ""
out.write("%s\t%s\t%s\t%d\t%d\t%s\t%s\t%s\t%s%s\n" % \
(region.seqname,
source,
region.feature,
region.start,
region.end,
score,
strand,
frame,
attr,
comment))
def build_hierarchy(self, regions):
"""
Produces a hierachy from a list of regions
Returns list of regions with no parents (roots).
This base class function does nothing. See GFF3
"""
# do nothing
return []
# singleton
GFF = Gff()
#=============================================================================
# GTF fileformat
#
class Gtf (Gff):
def format_data(self, region):
lst = []
if region.species != "":
lst.append('species "%s";' % region.species)
for key, val in region.data.iteritems():
if key not in self.nondata:
lst.append('%s "%s";' % (key, str(val)))
return " ".join(lst)
def parse_data(self, text):
"""Parses an attribute field into a dict of key/value pairs"""
tokens = text.split(";")
data = {}
for attr in tokens[:-1]:
attr = attr.strip()
pos = attr.find(" ")
if pos == -1:
continue
key = attr[:pos]
val = attr[pos+1:].split("\"")[1]
data[key] = val
return data
def build_hierarchy(self, regions):
"""GTF has its own heirarchy system
It is currently not implemented"""
return []
GTF = Gtf()
#=============================================================================
# GFF3 fileformat
#
class Gff3 (Gff):
def format_data(self, region):
lst = []
if region.species != "":
lst.append("species=%s;" % region.species)
for key, val in region.data.iteritems():
if key not in self.nondata:
lst.append('%s=%s;' % (key, str(val)))
return "".join(lst)
def parse_data(self, text):
"""Parses an attribute field into a dict of key/value pairs"""
tokens = text.split(";")
data = {}
for attr in tokens:
attr = attr.strip()
if len(attr) == 0:
continue
pos = attr.index("=")
key = attr[:pos]
val = attr[pos+1:]
data[key] = val
return data
def build_hierarchy(self, regions):
"""
Produces a hierachy from a list of regions
Returns list of regions with no parents (roots).
Assumes ID and Parent attributes are present.
"""
# make a list of regions in case regions is not a list
if not isinstance(regions, list):
regions = list(regions)
# make lookup by id
roots = set()
lookup = {}
for region in regions:
if "ID" in region.data:
lookup[region.data["ID"]] = region
roots.add(region)
# build hierarchy
for region in regions:
if "Parent" in region.data:
parents = region.data["Parent"].split(",")
for parent in parents:
lookup[parent].add_child(region)
roots.remove(region)
# create roots list (regions in same order as they were passed)
regions2 = [x for x in regions if x in roots]
return regions2
GFF3 = Gff3()
#=============================================================================
# Gff Input/Output
#
def read_gff(filename, format=GFF3,
lineFilter=lambda x: True,
regionFilter=lambda x: True):
"""
Read all regions in a GFF file
"""
infile = iterGff(filename,
format,
lineFilter,
regionFilter)
return list(infile)
readGff = read_gff
def write_gff(filename, regions, format=GFF3):
"""
Write regions to a file stream
filename - a filename or file stream
regions - a list of Region objects
"""
out = util.open_stream(filename, "w")
for region in regions:
format.write_region(region, out=out)
writeGff = write_gff
def iter_gff(filename, format=GFF3,
line_filter=lambda x: True,
region_filter=lambda x: True,
# backcompat
lineFilter=None,
regionFilter=None):
"""
Iterate over the regions in a GFF file
"""
if lineFilter is not None:
line_filter = lineFilter
if regionFilter is not None:
region_filter = regionFilter
infile = util.open_stream(filename)
lineno = 0
for line in infile:
lineno += 1
line = line.rstrip("\n")
# only continue processing if line is not comment and passes filter
if len(line) == 0 or line[0] == "#" or not line_filter(line):
continue
# parse region
try:
region = format.read_region(line)
except Exception, e:
raise Exception("%s\nError on line %d: %s" % (e,lineno, line))
# only return region if region passes filter
if region_filter(region):
yield region
iterGff = iter_gff
#
# testing
#
if __name__ == "__main__":
from rasmus.common import *
import re
TEST_GTF = \
"""
140\tTwinscan\tinter\t5141\t8522\t.\t-\t.\tgene_id ""; transcript_id "";
140\tTwinscan\tinter_CNS\t8523\t9711\t.\t-\t.\tgene_id ""; transcript_id "";
"""
TEST_GFF3 = \
"""
chr2\tTwinscan\tmRNA\t5141\t8522\t.\t-\t.\tID=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon1; Parent=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon2; Parent=gene1;
chr2\tTwinscan\texon\t8523\t9711\t.\t-\t.\tID=exon3; Parent=gene1;
"""
TEST_GFF3_2 = \
re.sub(" +", "\t", """
##gff-version 3
##sequence-region ctg123 1 1497228
ctg123 . gene 1000 9000 . + . ID=gene00001;Name=EDEN
ctg123 . TF_binding_site 1000 1012 . + . ID=tfbs00001;Parent=gene00001
ctg123 . mRNA 1050 9000 . + . ID=mRNA00001;Parent=gene00001;Name=EDEN.1
ctg123 . mRNA 1050 9000 . + . ID=mRNA00002;Parent=gene00001;Name=EDEN.2
ctg123 . mRNA 1300 9000 . + . ID=mRNA00003;Parent=gene00001;Name=EDEN.3
ctg123 . exon 1300 1500 . + . ID=exon00001;Parent=mRNA00003
ctg123 . exon 1050 1500 . + . ID=exon00002;Parent=mRNA00001,mRNA00002
ctg123 . exon 3000 3902 . + . ID=exon00003;Parent=mRNA00001,mRNA00003
ctg123 . exon 5000 5500 . + . ID=exon00004;Parent=mRNA00001,mRNA00002,mRNA00003
ctg123 . exon 7000 9000 . + . ID=exon00005;Parent=mRNA00001,mRNA00002,mRNA00003
ctg123 . CDS 1201 1500 . + 0 ID=cds000011;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 3000 3902 . + 0 ID=cds000012;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 5000 5500 . + 0 ID=cds000013;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 7000 7600 . + 0 ID=cds000014;Parent=mRNA00001;Name=edenprotein.1
ctg123 . CDS 1201 1500 . + 0 ID=cds000021;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 5000 5500 . + 0 ID=cds000022;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 7000 7600 . + 0 ID=cds000023;Parent=mRNA00002;Name=edenprotein.2
ctg123 . CDS 3301 3902 . + 0 ID=cds000031;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 5000 5500 . + 2 ID=cds000032;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 7000 7600 . + 2 ID=cds000033;Parent=mRNA00003;Name=edenprotein.3
ctg123 . CDS 3391 3902 . + 0 ID=cds000041;Parent=mRNA00003;Name=edenprotein.4
ctg123 . CDS 5000 5500 . + 2 ID=cds000042;Parent=mRNA00003;Name=edenprotein.4
Ctg123 . CDS 7000 7600 . + 2 ID=cds000043;Parent=mRNA00003;Name=edenprotein.4
""")
regions = read_gff(strStream(TEST_GFF3_2), format=GFF3)
regions2 = GFF3.build_hierarchy(regions)
print regions2
print regions2[0]
pc(read_gff(strStream(TEST_GTF)))
| false | true |
f73025c4d9d89dd6e0b7fe5b4dd2de1d97525760 | 3,376 | py | Python | examples/nips17_adversarial_competition/dev_toolkit/sample_defenses/ens_adv_inception_resnet_v2/defense.py | luvrpg/cleverhans | 1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42 | [
"MIT"
] | 50 | 2018-11-20T11:59:18.000Z | 2021-11-01T18:01:42.000Z | examples/nips17_adversarial_competition/dev_toolkit/sample_defenses/ens_adv_inception_resnet_v2/defense.py | luvrpg/cleverhans | 1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42 | [
"MIT"
] | 2 | 2019-07-22T20:59:01.000Z | 2019-11-17T07:00:00.000Z | examples/nips17_adversarial_competition/dev_toolkit/sample_defenses/ens_adv_inception_resnet_v2/defense.py | luvrpg/cleverhans | 1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42 | [
"MIT"
] | 20 | 2018-03-14T14:01:55.000Z | 2021-09-17T19:19:56.000Z | """Implementation of sample defense.
This defense loads inception resnet v2 checkpoint and classifies all images
using loaded checkpoint.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import inception_resnet_v2
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_file', '', 'Output file to save labels.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 16, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
"""Read png images from input directory in batches.
Args:
input_dir: input directory
batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]
Yields:
filenames: list file names without path of each image
Lenght of this list could be less than batch_size, in this case only
first few images of the result are elements of the minibatch.
images: array with all images from this batch
"""
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = imread(f, mode='RGB').astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def main(_):
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
_, end_points = inception_resnet_v2.inception_resnet_v2(
x_input, num_classes=num_classes, is_training=False)
predicted_labels = tf.argmax(end_points['Predictions'], 1)
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
labels = sess.run(predicted_labels, feed_dict={x_input: images})
for filename, label in zip(filenames, labels):
out_file.write('{0},{1}\n'.format(filename, label))
if __name__ == '__main__':
tf.app.run()
| 29.876106 | 79 | 0.707642 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import inception_resnet_v2
slim = tf.contrib.slim
tf.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.flags.DEFINE_string(
'checkpoint_path', '', 'Path to checkpoint for inception network.')
tf.flags.DEFINE_string(
'input_dir', '', 'Input directory with images.')
tf.flags.DEFINE_string(
'output_file', '', 'Output file to save labels.')
tf.flags.DEFINE_integer(
'image_width', 299, 'Width of each input images.')
tf.flags.DEFINE_integer(
'image_height', 299, 'Height of each input images.')
tf.flags.DEFINE_integer(
'batch_size', 16, 'How many images process at one time.')
FLAGS = tf.flags.FLAGS
def load_images(input_dir, batch_shape):
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = imread(f, mode='RGB').astype(np.float) / 255.0
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def main(_):
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
num_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
x_input = tf.placeholder(tf.float32, shape=batch_shape)
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
_, end_points = inception_resnet_v2.inception_resnet_v2(
x_input, num_classes=num_classes, is_training=False)
predicted_labels = tf.argmax(end_points['Predictions'], 1)
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
labels = sess.run(predicted_labels, feed_dict={x_input: images})
for filename, label in zip(filenames, labels):
out_file.write('{0},{1}\n'.format(filename, label))
if __name__ == '__main__':
tf.app.run()
| true | true |
f730267630885ce1626fc7dd223bb44956acfce4 | 1,937 | py | Python | transformers/md5/server.py | NVIDIA/ais-etl | e60e4c5a8be208379916fc245fd874f670336ce2 | [
"MIT"
] | 4 | 2020-08-08T19:39:33.000Z | 2021-06-02T19:14:34.000Z | transformers/md5/server.py | NVIDIA/ais-tar2tf | 4e4a9e448d7249e3e19481ca32c3f53fe5022ecc | [
"MIT"
] | null | null | null | transformers/md5/server.py | NVIDIA/ais-tar2tf | 4e4a9e448d7249e3e19481ca32c3f53fe5022ecc | [
"MIT"
] | 4 | 2020-10-28T19:49:15.000Z | 2022-03-28T23:21:02.000Z | #!/usr/bin/env python
import argparse
import hashlib
import requests
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
host_target = os.environ['AIS_TARGET_URL']
class Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
# Don't log successful requests info. Unsuccessful logged by log_error().
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
md5 = hashlib.md5()
md5.update(post_data)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"OK")
return
x = requests.get(host_target + self.path)
md5 = hashlib.md5()
md5.update(x.content)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| 27.28169 | 81 | 0.637584 |
import argparse
import hashlib
import requests
import os
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
host_target = os.environ['AIS_TARGET_URL']
class Handler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
pass
def _set_headers(self):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_PUT(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
md5 = hashlib.md5()
md5.update(post_data)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
def do_GET(self):
if self.path == "/health":
self._set_headers()
self.wfile.write(b"OK")
return
x = requests.get(host_target + self.path)
md5 = hashlib.md5()
md5.update(x.content)
self._set_headers()
self.wfile.write(md5.hexdigest().encode())
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
def run(addr="localhost", port=8000):
server = ThreadedHTTPServer((addr, port), Handler)
print(f"Starting HTTP server on {addr}:{port}")
server.serve_forever()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
run(addr=args.listen, port=args.port)
| true | true |
f7302747c69d4cf11b6c0eb5fdafe30a97f317af | 4,356 | py | Python | KoreanLipNet/training/overlapped_speakers/train.py | khujay15/koreanLipNet | 9db7524c7f3a577841cff88c7cd195e941c06fd6 | [
"MIT"
] | 1 | 2020-05-19T01:47:24.000Z | 2020-05-19T01:47:24.000Z | KoreanLipNet/training/overlapped_speakers/train.py | youda9/koreanLipNet | 46e1304477b2bd275206559e21815d204a5d1a72 | [
"MIT"
] | null | null | null | KoreanLipNet/training/overlapped_speakers/train.py | youda9/koreanLipNet | 46e1304477b2bd275206559e21815d204a5d1a72 | [
"MIT"
] | null | null | null | from keras.optimizers import Adam
from keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from lipnet.lipreading.generators import BasicGenerator
from lipnet.lipreading.callbacks import Statistics, Visualize
from lipnet.lipreading.curriculums import Curriculum
from lipnet.core.decoders import Decoder
from lipnet.lipreading.helpers import labels_to_text
from lipnet.utils.spell import Spell
from lipnet.model2 import LipNet
import numpy as np
import datetime
import os
import sys
np.random.seed(55) # seed value setting
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__)) # train.py path
PREDICT_GREEDY = False # ??
PREDICT_BEAM_WIDTH = 200 # ??
PREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt') # predict dic
def curriculum_rules(epoch):
return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }
def train(run_name, speaker, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):
DATASET_DIR = os.path.join(CURRENT_PATH, speaker, 'datasets') #datasets dir path
OUTPUT_DIR = os.path.join(CURRENT_PATH, speaker, 'results') #results dir path
LOG_DIR = os.path.join(CURRENT_PATH, speaker, 'logs') #logs dir path
curriculum = Curriculum(curriculum_rules)
print "Generator start -- "
lip_gen = BasicGenerator(dataset_path=DATASET_DIR,
minibatch_size=minibatch_size,
img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len,
curriculum=curriculum, start_epoch=start_epoch).build()
print "Generator finish --"
lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())
lipnet.summary()
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
# load weight if necessary
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
lipnet.model.load_weights(weight_file)
print "spell start--"
spell = Spell(path=PREDICT_DICTIONARY)
print "spell finish--"
print "decoder start--"
decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,
postprocessors=[labels_to_text, spell.sentence])
print "decoder finish--"
# define callbacks
statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))
visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)
#tensorboard = TensorBoard(log_dir=os.path.join(LOG_DIR, run_name))
csv_logger = CSVLogger(os.path.join(LOG_DIR, "{}-{}.csv".format('training',run_name)), separator=',', append=True)
checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, "weights{epoch:02d}.h5"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)
lipnet.model.fit_generator(generator=lip_gen.next_train(),
steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,
validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,
callbacks=[checkpoint, statistics, visualize, lip_gen, csv_logger],
initial_epoch=start_epoch,
verbose=1,
max_q_size=5,
workers=2,
pickle_safe=True)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S') # now time ex)2019:05:15:16:14:20
speaker = sys.argv[1] # speaker : s{1}
train(run_name, speaker, 0, 5000, 3, 100, 50, 75, 32, 2) #5000 epoch color 100x50 75frames 32len string minibatch_size 50
# train(run_name, speaker, 0, 5000, 3, 100, 50, 75, 32, 50) #5000 epoch color 100x50 75frames 32len string minibatch_size 50
| 49.5 | 161 | 0.688246 | from keras.optimizers import Adam
from keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from lipnet.lipreading.generators import BasicGenerator
from lipnet.lipreading.callbacks import Statistics, Visualize
from lipnet.lipreading.curriculums import Curriculum
from lipnet.core.decoders import Decoder
from lipnet.lipreading.helpers import labels_to_text
from lipnet.utils.spell import Spell
from lipnet.model2 import LipNet
import numpy as np
import datetime
import os
import sys
np.random.seed(55)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
PREDICT_GREEDY = False
PREDICT_BEAM_WIDTH = 200
PREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt')
def curriculum_rules(epoch):
return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }
def train(run_name, speaker, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):
DATASET_DIR = os.path.join(CURRENT_PATH, speaker, 'datasets')
OUTPUT_DIR = os.path.join(CURRENT_PATH, speaker, 'results')
LOG_DIR = os.path.join(CURRENT_PATH, speaker, 'logs')
curriculum = Curriculum(curriculum_rules)
print "Generator start -- "
lip_gen = BasicGenerator(dataset_path=DATASET_DIR,
minibatch_size=minibatch_size,
img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len,
curriculum=curriculum, start_epoch=start_epoch).build()
print "Generator finish --"
lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())
lipnet.summary()
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
lipnet.model.load_weights(weight_file)
print "spell start--"
spell = Spell(path=PREDICT_DICTIONARY)
print "spell finish--"
print "decoder start--"
decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,
postprocessors=[labels_to_text, spell.sentence])
print "decoder finish--"
statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))
visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)
csv_logger = CSVLogger(os.path.join(LOG_DIR, "{}-{}.csv".format('training',run_name)), separator=',', append=True)
checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, "weights{epoch:02d}.h5"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)
lipnet.model.fit_generator(generator=lip_gen.next_train(),
steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,
validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,
callbacks=[checkpoint, statistics, visualize, lip_gen, csv_logger],
initial_epoch=start_epoch,
verbose=1,
max_q_size=5,
workers=2,
pickle_safe=True)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
speaker = sys.argv[1]
train(run_name, speaker, 0, 5000, 3, 100, 50, 75, 32, 2)
| false | true |
f73027e9e4952cf5a8f78b5328442c74a11c9e43 | 234 | py | Python | kwueBackend/kwue/controllers/list.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
] | 6 | 2016-02-14T18:04:48.000Z | 2016-12-18T20:09:15.000Z | kwueBackend/kwue/controllers/list.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
] | 113 | 2016-02-14T18:06:57.000Z | 2021-06-10T17:57:12.000Z | kwueBackend/kwue/controllers/list.py | bounswe/bounswe2016group4 | cbc8201aa86049b81f20ef44ee37eb065a469d46 | [
"Apache-2.0"
] | 1 | 2017-02-15T18:48:55.000Z | 2017-02-15T18:48:55.000Z | from django.shortcuts import render
def get_list(req):
return render(req, 'kwue/food.html', {})
def add_item(req):
return render(req, 'kwue/food.html', {})
def create_list(req):
return render(req, 'kwue/food.html', {})
| 21.272727 | 44 | 0.67094 | from django.shortcuts import render
def get_list(req):
return render(req, 'kwue/food.html', {})
def add_item(req):
return render(req, 'kwue/food.html', {})
def create_list(req):
return render(req, 'kwue/food.html', {})
| true | true |
f73028328736056212bd4d00388f2744b0775730 | 29,066 | py | Python | mbpo/algorithms/meee.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | mbpo/algorithms/meee.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | mbpo/algorithms/meee.py | YaoYao1995/mbpo | b9571e469459ce3a632b19dc3fee68c9ac3857b2 | [
"MIT"
] | null | null | null | ## adapted from https://github.com/rail-berkeley/softlearning/blob/master/softlearning/algorithms/sac.py
import os
import math
import pickle
from collections import OrderedDict
from numbers import Number
from itertools import count
import gtimer as gt
import pdb
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from softlearning.algorithms.rl_algorithm import RLAlgorithm
from softlearning.replay_pools.simple_replay_pool import WeightedReplayPool
from mbpo.models.constructor import construct_model, format_samples_for_training
from mbpo.models.fake_env import FakeEnv
from mbpo.utils.writer import Writer
from mbpo.utils.visualization import visualize_policy
from mbpo.utils.logging import Progress
import mbpo.utils.filesystem as filesystem
def td_target(reward, discount, next_value):
return reward + discount * next_value
class MEEE(RLAlgorithm):
""" Model-Ensemble Policy Optimization (MEEE)
"""
def __init__(
self,
training_environment,
evaluation_environment,
policy,
Qs,
pool,
static_fns,
plotter=None,
tf_summaries=False,
lr=3e-4,
reward_scale=1.0,
target_entropy='auto',
discount=0.99,
tau=5e-3,
target_update_interval=1,
action_prior='uniform',
reparameterize=False,
store_extra_policy_info=False,
deterministic=False,
model_train_freq=250,
num_networks=7,
num_elites=5,
model_retain_epochs=20,
rollout_batch_size=100e3,
real_ratio=0.1,
rollout_schedule=[20,100,1,1],
hidden_dim=200,
max_model_t=None,
**kwargs,
):
"""
Args:
env (`SoftlearningEnv`): Environment used for training.
policy: A policy function approximator.
initial_exploration_policy: ('Policy'): A policy that we use
for initial exploration which is not trained by the algorithm.
Qs: Q-function approximators. The min of these
approximators will be used. Usage of at least two Q-functions
improves performance by reducing overestimation bias.
pool (`PoolBase`): Replay pool to add gathered samples to.
plotter (`QFPolicyPlotter`): Plotter instance to be used for
visualizing Q-function during training.
lr (`float`): Learning rate used for the function approximators.
discount (`float`): Discount factor for Q-function updates.
tau (`float`): Soft value function target update weight.
target_update_interval ('int'): Frequency at which target network
updates occur in iterations.
reparameterize ('bool'): If True, we use a gradient estimator for
the policy derived using the reparameterization trick. We use
a likelihood ratio based estimator otherwise.
"""
super(MEEE, self).__init__(**kwargs)
obs_dim = np.prod(training_environment.observation_space.shape)
act_dim = np.prod(training_environment.action_space.shape)
self._model = construct_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, num_networks=num_networks, num_elites=num_elites)
self._static_fns = static_fns
self.fake_env = FakeEnv(self._model, self._static_fns)
self._rollout_schedule = rollout_schedule
self._max_model_t = max_model_t
# self._model_pool_size = model_pool_size
# print('[ MBPO ] Model pool size: {:.2E}'.format(self._model_pool_size))
# self._model_pool = WeightedReplayPool(pool._observation_space, pool._action_space, self._model_pool_size)
self._model_retain_epochs = model_retain_epochs
self._model_train_freq = model_train_freq
self._rollout_batch_size = int(rollout_batch_size)
self._deterministic = deterministic
self._real_ratio = real_ratio
self._log_dir = os.getcwd()
self._writer = Writer(self._log_dir)
self._training_environment = training_environment
self._evaluation_environment = evaluation_environment
self._policy = policy
self._Qs = Qs
self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)
self._pool = pool
self._plotter = plotter
self._tf_summaries = tf_summaries
self._policy_lr = lr
self._Q_lr = lr
self._reward_scale = reward_scale
self._target_entropy = (
-np.prod(self._training_environment.action_space.shape)
if target_entropy == 'auto'
else target_entropy)
print('[ MEEE ] Target entropy: {}'.format(self._target_entropy))
self._discount = discount
self._tau = tau
self._target_update_interval = target_update_interval
self._action_prior = action_prior
self._reparameterize = reparameterize
self._store_extra_policy_info = store_extra_policy_info
observation_shape = self._training_environment.active_observation_shape
action_shape = self._training_environment.action_space.shape
assert len(observation_shape) == 1, observation_shape
self._observation_shape = observation_shape
assert len(action_shape) == 1, action_shape
self._action_shape = action_shape
self._build()
def _build(self):
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
def _train(self):
"""Return a generator that performs RL training.
Args:
env (`SoftlearningEnv`): Environment used for training.
policy (`Policy`): Policy used for training
initial_exploration_policy ('Policy'): Policy used for exploration
If None, then all exploration is done using policy
pool (`PoolBase`): Sample pool to add samples to
"""
training_environment = self._training_environment
evaluation_environment = self._evaluation_environment
policy = self._policy
pool = self._pool
model_metrics = {}
if not self._training_started:
self._init_training()
self._initial_exploration_hook(
training_environment, self._initial_exploration_policy, pool)
self.sampler.initialize(training_environment, policy, pool)
gt.reset_root()
gt.rename_root('RLAlgorithm')
gt.set_def_unique(False)
self._training_before_hook()
for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):
self._epoch_before_hook()
gt.stamp('epoch_before_hook')
self._training_progress = Progress(self._epoch_length * self._n_train_repeat)
start_samples = self.sampler._total_samples
for i in count():
samples_now = self.sampler._total_samples
self._timestep = samples_now - start_samples
if (samples_now >= start_samples + self._epoch_length
and self.ready_to_train):
break
self._timestep_before_hook()
gt.stamp('timestep_before_hook')
if self._timestep % self._model_train_freq == 0 and self._real_ratio < 1.0:
self._training_progress.pause()
print('[ MEEE ] log_dir: {} | ratio: {}'.format(self._log_dir, self._real_ratio))
print('[ MEEE ] Training model at epoch {} | freq {} | timestep {} (total: {}) | epoch train steps: {} (total: {})'.format(
self._epoch, self._model_train_freq, self._timestep, self._total_timestep, self._train_steps_this_epoch, self._num_train_steps)
)
model_train_metrics = self._train_model(batch_size=256, max_epochs=None, holdout_ratio=0.2, max_t=self._max_model_t)
model_metrics.update(model_train_metrics)
gt.stamp('epoch_train_model')
self._set_rollout_length()
self._reallocate_model_pool()
model_rollout_metrics = self._rollout_model(rollout_batch_size=self._rollout_batch_size, deterministic=self._deterministic)
model_metrics.update(model_rollout_metrics)
gt.stamp('epoch_rollout_model')
# self._visualize_model(self._evaluation_environment, self._total_timestep)
self._training_progress.resume()
# No UCB exploration
#self._do_sampling(timestep=self._total_timestep)
self._do_sampling(timestep=self._total_timestep, disturb=True, fake_env=self.fake_env, Qs = self._Qs)
#print("**exploration**")
gt.stamp('sample')
if self.ready_to_train:
self._do_training_repeats(timestep=self._total_timestep)
gt.stamp('train')
self._timestep_after_hook()
gt.stamp('timestep_after_hook')
training_paths = self.sampler.get_last_n_paths(
math.ceil(self._epoch_length / self.sampler._max_path_length))
gt.stamp('training_paths')
evaluation_paths = self._evaluation_paths(
policy, evaluation_environment)
gt.stamp('evaluation_paths')
training_metrics = self._evaluate_rollouts(
training_paths, training_environment)
gt.stamp('training_metrics')
if evaluation_paths:
evaluation_metrics = self._evaluate_rollouts(
evaluation_paths, evaluation_environment)
gt.stamp('evaluation_metrics')
else:
evaluation_metrics = {}
self._epoch_after_hook(training_paths)
gt.stamp('epoch_after_hook')
sampler_diagnostics = self.sampler.get_diagnostics()
diagnostics = self.get_diagnostics(
iteration=self._total_timestep,
batch=self._evaluation_batch(),
training_paths=training_paths,
evaluation_paths=evaluation_paths)
time_diagnostics = gt.get_times().stamps.itrs
diagnostics.update(OrderedDict((
*(
(f'evaluation/{key}', evaluation_metrics[key])
for key in sorted(evaluation_metrics.keys())
),
*(
(f'training/{key}', training_metrics[key])
for key in sorted(training_metrics.keys())
),
*(
(f'times/{key}', time_diagnostics[key][-1])
for key in sorted(time_diagnostics.keys())
),
*(
(f'sampler/{key}', sampler_diagnostics[key])
for key in sorted(sampler_diagnostics.keys())
),
*(
(f'model/{key}', model_metrics[key])
for key in sorted(model_metrics.keys())
),
('epoch', self._epoch),
('timestep', self._timestep),
('timesteps_total', self._total_timestep),
('train-steps', self._num_train_steps),
)))
if self._eval_render_mode is not None and hasattr(
evaluation_environment, 'render_rollouts'):
training_environment.render_rollouts(evaluation_paths)
yield diagnostics
self.sampler.terminate()
self._training_after_hook()
self._training_progress.close()
yield {'done': True, **diagnostics}
def train(self, *args, **kwargs):
return self._train(*args, **kwargs)
def _log_policy(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
weights = self._policy.get_weights()
data = {'policy_weights': weights}
full_path = os.path.join(save_path, 'policy_{}.pkl'.format(self._total_timestep))
print('Saving policy to: {}'.format(full_path))
pickle.dump(data, open(full_path, 'wb'))
def _log_model(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
print('Saving model to: {}'.format(save_path))
self._model.save(save_path, self._total_timestep)
def _set_rollout_length(self):
min_epoch, max_epoch, min_length, max_length = self._rollout_schedule
if self._epoch <= min_epoch:
y = min_length
else:
dx = (self._epoch - min_epoch) / (max_epoch - min_epoch)
dx = min(dx, 1)
y = dx * (max_length - min_length) + min_length
self._rollout_length = int(y)
print('[ Model Length ] Epoch: {} (min: {}, max: {}) | Length: {} (min: {} , max: {})'.format(
self._epoch, min_epoch, max_epoch, self._rollout_length, min_length, max_length
))
def _reallocate_model_pool(self):
obs_space = self._pool._observation_space
act_space = self._pool._action_space
rollouts_per_epoch = self._rollout_batch_size * self._epoch_length / self._model_train_freq
model_steps_per_epoch = int(self._rollout_length * rollouts_per_epoch)
new_pool_size = self._model_retain_epochs * model_steps_per_epoch
if not hasattr(self, '_model_pool'):
print('[ MEEE ] Initializing new model pool with size {:.2e}'.format(
new_pool_size
))
self._model_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
elif self._model_pool._max_size != new_pool_size:
print('[ MEEE ] Updating model pool | {:.2e} --> {:.2e}'.format(
self._model_pool._max_size, new_pool_size
))
samples = self._model_pool.return_all_samples()
new_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
new_pool.add_samples(samples)
assert self._model_pool.size == new_pool.size
self._model_pool = new_pool
def _train_model(self, **kwargs):
env_samples = self._pool.return_all_samples()
train_inputs, train_outputs = format_samples_for_training(env_samples)
model_metrics = self._model.train(train_inputs, train_outputs, **kwargs)
return model_metrics
def _rollout_model(self, rollout_batch_size, **kwargs):
print('[ Model Rollout ] Starting | Epoch: {} | Rollout length: {} | Batch size: {}'.format(
self._epoch, self._rollout_length, rollout_batch_size
))
batch = self.sampler.random_batch(rollout_batch_size)
obs = batch['observations']
steps_added = []
for i in range(self._rollout_length):
act = self._policy.actions_np(obs)
next_obs, rew, term, info = self.fake_env.step(obs, act, **kwargs)
steps_added.append(len(obs))
samples = {'observations': obs, 'actions': act, 'next_observations': next_obs, 'rewards': rew, 'terminals': term, 'stds': info['dev'][:,None]}
self._model_pool.add_samples(samples)
nonterm_mask = ~term.squeeze(-1)
if nonterm_mask.sum() == 0:
print('[ Model Rollout ] Breaking early: {} | {} / {}'.format(i, nonterm_mask.sum(), nonterm_mask.shape))
break
obs = next_obs[nonterm_mask]
mean_rollout_length = sum(steps_added) / rollout_batch_size
rollout_stats = {'mean_rollout_length': mean_rollout_length}
print('[ Model Rollout ] Added: {:.1e} | Model pool: {:.1e} (max {:.1e}) | Length: {} | Train rep: {}'.format(
sum(steps_added), self._model_pool.size, self._model_pool._max_size, mean_rollout_length, self._n_train_repeat
))
return rollout_stats
def _visualize_model(self, env, timestep):
## save env state
state = env.unwrapped.state_vector()
qpos_dim = len(env.unwrapped.sim.data.qpos)
qpos = state[:qpos_dim]
qvel = state[qpos_dim:]
print('[ Visualization ] Starting | Epoch {} | Log dir: {}\n'.format(self._epoch, self._log_dir))
visualize_policy(env, self.fake_env, self._policy, self._writer, timestep)
print('[ Visualization ] Done')
## set env state
env.unwrapped.set_state(qpos, qvel)
def _training_batch(self, batch_size=None):
batch_size = batch_size or self.sampler._batch_size
env_batch_size = int(batch_size*self._real_ratio)
model_batch_size = batch_size - env_batch_size
## can sample from the env pool even if env_batch_size == 0
env_batch = self._pool.random_batch(env_batch_size)
if model_batch_size > 0:
model_batch = self._model_pool.random_batch(model_batch_size)
keys = env_batch.keys()
batch = {k: np.concatenate((env_batch[k], model_batch[k]), axis=0) for k in keys}
else:
## if real_ratio == 1.0, no model pool was ever allocated,
## so skip the model pool sampling
batch = env_batch
return batch
def _init_global_step(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
"""Create input placeholders for the SAC algorithm.
Creates `tf.placeholder`s for:
- observation
- next observation
- action
- reward
- terminals
- stds
"""
self._iteration_ph = tf.placeholder(
tf.int64, shape=None, name='iteration')
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
)
self._stds_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='stds',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='terminals',
)
if self._store_extra_policy_info:
self._log_pis_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='log_pis',
)
self._raw_actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='raw_actions',
)
def _get_Q_target(self):
next_actions = self._policy.actions([self._next_observations_ph])
next_log_pis = self._policy.log_pis(
[self._next_observations_ph], next_actions)
next_Qs_values = tuple(
Q([self._next_observations_ph, next_actions])
for Q in self._Q_targets)
min_next_Q = tf.reduce_min(next_Qs_values, axis=0)
next_value = min_next_Q - self._alpha * next_log_pis
Q_target = td_target(
reward=self._reward_scale * self._rewards_ph,
discount=self._discount,
next_value=(1 - self._terminals_ph) * next_value)
return Q_target
def _init_critic_update(self):
"""Create minimization operation for critic Q-function.
Creates a `tf.optimizer.minimize` operation for updating
critic Q-function with gradient descent, and appends it to
`self._training_ops` attribute.
"""
Q_target = tf.stop_gradient(self._get_Q_target())
assert Q_target.shape.as_list() == [None, 1]
# weighted critic loss
temperature_critic = 5.0
weight_target_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_critic))
Q_values = self._Q_values = tuple(
Q([self._observations_ph, self._actions_ph])
for Q in self._Qs)
Q_losses = self._Q_losses = tuple(
tf.losses.mean_squared_error(
labels=Q_target, predictions=Q_value, weights=weight_target_Q)
for Q_value in Q_values)
self._Q_optimizers = tuple(
tf.train.AdamOptimizer(
learning_rate=self._Q_lr,
name='{}_{}_optimizer'.format(Q._name, i)
) for i, Q in enumerate(self._Qs))
Q_training_ops = tuple(
tf.contrib.layers.optimize_loss(
Q_loss,
self.global_step,
learning_rate=self._Q_lr,
optimizer=Q_optimizer,
variables=Q.trainable_variables,
increment_global_step=False,
summaries=((
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ()))
for i, (Q, Q_loss, Q_optimizer)
in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))
self._training_ops.update({'Q': tf.group(Q_training_ops)})
def _init_actor_update(self):
"""Create minimization operations for policy and entropy.
Creates a `tf.optimizer.minimize` operations for updating
policy and entropy with gradient descent, and adds them to
`self._training_ops` attribute.
"""
actions = self._policy.actions([self._observations_ph])
log_pis = self._policy.log_pis([self._observations_ph], actions)
assert log_pis.shape.as_list() == [None, 1]
log_alpha = self._log_alpha = tf.get_variable(
'log_alpha',
dtype=tf.float32,
initializer=0.0)
alpha = tf.exp(log_alpha)
if isinstance(self._target_entropy, Number):
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis + self._target_entropy))
self._alpha_optimizer = tf.train.AdamOptimizer(
self._policy_lr, name='alpha_optimizer')
self._alpha_train_op = self._alpha_optimizer.minimize(
loss=alpha_loss, var_list=[log_alpha])
self._training_ops.update({
'temperature_alpha': self._alpha_train_op
})
self._alpha = alpha
if self._action_prior == 'normal':
policy_prior = tf.contrib.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_shape),
scale_diag=tf.ones(self._action_shape))
policy_prior_log_probs = policy_prior.log_prob(actions)
elif self._action_prior == 'uniform':
policy_prior_log_probs = 0.0
Q_log_targets = tuple(
Q([self._observations_ph, actions])
for Q in self._Qs)
min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)
# weighted actor loss
temperature_act = 5.0
weight_actor_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_act) + 0.5)
if self._reparameterize:
policy_kl_losses = (
alpha * log_pis
- min_Q_log_target
- policy_prior_log_probs) * weight_actor_Q
else:
raise NotImplementedError
assert policy_kl_losses.shape.as_list() == [None, 1]
policy_loss = tf.reduce_mean(policy_kl_losses)
self._policy_optimizer = tf.train.AdamOptimizer(
learning_rate=self._policy_lr,
name="policy_optimizer")
policy_train_op = tf.contrib.layers.optimize_loss(
policy_loss,
self.global_step,
learning_rate=self._policy_lr,
optimizer=self._policy_optimizer,
variables=self._policy.trainable_variables,
increment_global_step=False,
summaries=(
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ())
self._training_ops.update({'policy_train_op': policy_train_op})
def _init_training(self):
self._update_target(tau=1.0)
def _update_target(self, tau=None):
tau = tau or self._tau
for Q, Q_target in zip(self._Qs, self._Q_targets):
source_params = Q.get_weights()
target_params = Q_target.get_weights()
Q_target.set_weights([
tau * source + (1.0 - tau) * target
for source, target in zip(source_params, target_params)
])
def _do_training(self, iteration, batch):
"""Runs the operations for updating training and target ops."""
self._training_progress.update()
self._training_progress.set_description()
feed_dict = self._get_feed_dict(iteration, batch)
self._session.run(self._training_ops, feed_dict)
if iteration % self._target_update_interval == 0:
# Run target ops here.
self._update_target()
def _get_feed_dict(self, iteration, batch):
"""Construct TensorFlow feed_dict from sample batch."""
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
self._stds_ph: batch['stds'],
}
if self._store_extra_policy_info:
feed_dict[self._log_pis_ph] = batch['log_pis']
feed_dict[self._raw_actions_ph] = batch['raw_actions']
if iteration is not None:
feed_dict[self._iteration_ph] = iteration
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
"""Return diagnostic information as ordered dictionary.
Records mean and standard deviation of Q-function and state
value function, and TD-loss (mean squared Bellman error)
for the sample batch.
Also calls the `draw` method of the plotter, if plotter defined.
"""
feed_dict = self._get_feed_dict(iteration, batch)
(Q_values, Q_losses, alpha, global_step) = self._session.run(
(self._Q_values,
self._Q_losses,
self._alpha,
self.global_step),
feed_dict)
diagnostics = OrderedDict({
'Q-avg': np.mean(Q_values),
'Q-std': np.std(Q_values),
'Q_loss': np.mean(Q_losses),
'alpha': alpha,
})
policy_diagnostics = self._policy.get_diagnostics(
batch['observations'])
diagnostics.update({
f'policy/{key}': value
for key, value in policy_diagnostics.items()
})
if self._plotter:
self._plotter.draw()
return diagnostics
@property
def tf_saveables(self):
saveables = {
'_policy_optimizer': self._policy_optimizer,
**{
f'Q_optimizer_{i}': optimizer
for i, optimizer in enumerate(self._Q_optimizers)
},
'_log_alpha': self._log_alpha,
}
if hasattr(self, '_alpha_optimizer'):
saveables['_alpha_optimizer'] = self._alpha_optimizer
return saveables
| 38.498013 | 155 | 0.588729 | r
from itertools import count
import gtimer as gt
import pdb
import numpy as np
import tensorflow as tf
from tensorflow.python.training import training_util
from softlearning.algorithms.rl_algorithm import RLAlgorithm
from softlearning.replay_pools.simple_replay_pool import WeightedReplayPool
from mbpo.models.constructor import construct_model, format_samples_for_training
from mbpo.models.fake_env import FakeEnv
from mbpo.utils.writer import Writer
from mbpo.utils.visualization import visualize_policy
from mbpo.utils.logging import Progress
import mbpo.utils.filesystem as filesystem
def td_target(reward, discount, next_value):
return reward + discount * next_value
class MEEE(RLAlgorithm):
def __init__(
self,
training_environment,
evaluation_environment,
policy,
Qs,
pool,
static_fns,
plotter=None,
tf_summaries=False,
lr=3e-4,
reward_scale=1.0,
target_entropy='auto',
discount=0.99,
tau=5e-3,
target_update_interval=1,
action_prior='uniform',
reparameterize=False,
store_extra_policy_info=False,
deterministic=False,
model_train_freq=250,
num_networks=7,
num_elites=5,
model_retain_epochs=20,
rollout_batch_size=100e3,
real_ratio=0.1,
rollout_schedule=[20,100,1,1],
hidden_dim=200,
max_model_t=None,
**kwargs,
):
super(MEEE, self).__init__(**kwargs)
obs_dim = np.prod(training_environment.observation_space.shape)
act_dim = np.prod(training_environment.action_space.shape)
self._model = construct_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, num_networks=num_networks, num_elites=num_elites)
self._static_fns = static_fns
self.fake_env = FakeEnv(self._model, self._static_fns)
self._rollout_schedule = rollout_schedule
self._max_model_t = max_model_t
self._model_retain_epochs = model_retain_epochs
self._model_train_freq = model_train_freq
self._rollout_batch_size = int(rollout_batch_size)
self._deterministic = deterministic
self._real_ratio = real_ratio
self._log_dir = os.getcwd()
self._writer = Writer(self._log_dir)
self._training_environment = training_environment
self._evaluation_environment = evaluation_environment
self._policy = policy
self._Qs = Qs
self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)
self._pool = pool
self._plotter = plotter
self._tf_summaries = tf_summaries
self._policy_lr = lr
self._Q_lr = lr
self._reward_scale = reward_scale
self._target_entropy = (
-np.prod(self._training_environment.action_space.shape)
if target_entropy == 'auto'
else target_entropy)
print('[ MEEE ] Target entropy: {}'.format(self._target_entropy))
self._discount = discount
self._tau = tau
self._target_update_interval = target_update_interval
self._action_prior = action_prior
self._reparameterize = reparameterize
self._store_extra_policy_info = store_extra_policy_info
observation_shape = self._training_environment.active_observation_shape
action_shape = self._training_environment.action_space.shape
assert len(observation_shape) == 1, observation_shape
self._observation_shape = observation_shape
assert len(action_shape) == 1, action_shape
self._action_shape = action_shape
self._build()
def _build(self):
self._training_ops = {}
self._init_global_step()
self._init_placeholders()
self._init_actor_update()
self._init_critic_update()
def _train(self):
training_environment = self._training_environment
evaluation_environment = self._evaluation_environment
policy = self._policy
pool = self._pool
model_metrics = {}
if not self._training_started:
self._init_training()
self._initial_exploration_hook(
training_environment, self._initial_exploration_policy, pool)
self.sampler.initialize(training_environment, policy, pool)
gt.reset_root()
gt.rename_root('RLAlgorithm')
gt.set_def_unique(False)
self._training_before_hook()
for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):
self._epoch_before_hook()
gt.stamp('epoch_before_hook')
self._training_progress = Progress(self._epoch_length * self._n_train_repeat)
start_samples = self.sampler._total_samples
for i in count():
samples_now = self.sampler._total_samples
self._timestep = samples_now - start_samples
if (samples_now >= start_samples + self._epoch_length
and self.ready_to_train):
break
self._timestep_before_hook()
gt.stamp('timestep_before_hook')
if self._timestep % self._model_train_freq == 0 and self._real_ratio < 1.0:
self._training_progress.pause()
print('[ MEEE ] log_dir: {} | ratio: {}'.format(self._log_dir, self._real_ratio))
print('[ MEEE ] Training model at epoch {} | freq {} | timestep {} (total: {}) | epoch train steps: {} (total: {})'.format(
self._epoch, self._model_train_freq, self._timestep, self._total_timestep, self._train_steps_this_epoch, self._num_train_steps)
)
model_train_metrics = self._train_model(batch_size=256, max_epochs=None, holdout_ratio=0.2, max_t=self._max_model_t)
model_metrics.update(model_train_metrics)
gt.stamp('epoch_train_model')
self._set_rollout_length()
self._reallocate_model_pool()
model_rollout_metrics = self._rollout_model(rollout_batch_size=self._rollout_batch_size, deterministic=self._deterministic)
model_metrics.update(model_rollout_metrics)
gt.stamp('epoch_rollout_model')
self._training_progress.resume()
self._do_sampling(timestep=self._total_timestep, disturb=True, fake_env=self.fake_env, Qs = self._Qs)
gt.stamp('sample')
if self.ready_to_train:
self._do_training_repeats(timestep=self._total_timestep)
gt.stamp('train')
self._timestep_after_hook()
gt.stamp('timestep_after_hook')
training_paths = self.sampler.get_last_n_paths(
math.ceil(self._epoch_length / self.sampler._max_path_length))
gt.stamp('training_paths')
evaluation_paths = self._evaluation_paths(
policy, evaluation_environment)
gt.stamp('evaluation_paths')
training_metrics = self._evaluate_rollouts(
training_paths, training_environment)
gt.stamp('training_metrics')
if evaluation_paths:
evaluation_metrics = self._evaluate_rollouts(
evaluation_paths, evaluation_environment)
gt.stamp('evaluation_metrics')
else:
evaluation_metrics = {}
self._epoch_after_hook(training_paths)
gt.stamp('epoch_after_hook')
sampler_diagnostics = self.sampler.get_diagnostics()
diagnostics = self.get_diagnostics(
iteration=self._total_timestep,
batch=self._evaluation_batch(),
training_paths=training_paths,
evaluation_paths=evaluation_paths)
time_diagnostics = gt.get_times().stamps.itrs
diagnostics.update(OrderedDict((
*(
(f'evaluation/{key}', evaluation_metrics[key])
for key in sorted(evaluation_metrics.keys())
),
*(
(f'training/{key}', training_metrics[key])
for key in sorted(training_metrics.keys())
),
*(
(f'times/{key}', time_diagnostics[key][-1])
for key in sorted(time_diagnostics.keys())
),
*(
(f'sampler/{key}', sampler_diagnostics[key])
for key in sorted(sampler_diagnostics.keys())
),
*(
(f'model/{key}', model_metrics[key])
for key in sorted(model_metrics.keys())
),
('epoch', self._epoch),
('timestep', self._timestep),
('timesteps_total', self._total_timestep),
('train-steps', self._num_train_steps),
)))
if self._eval_render_mode is not None and hasattr(
evaluation_environment, 'render_rollouts'):
training_environment.render_rollouts(evaluation_paths)
yield diagnostics
self.sampler.terminate()
self._training_after_hook()
self._training_progress.close()
yield {'done': True, **diagnostics}
def train(self, *args, **kwargs):
return self._train(*args, **kwargs)
def _log_policy(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
weights = self._policy.get_weights()
data = {'policy_weights': weights}
full_path = os.path.join(save_path, 'policy_{}.pkl'.format(self._total_timestep))
print('Saving policy to: {}'.format(full_path))
pickle.dump(data, open(full_path, 'wb'))
def _log_model(self):
save_path = os.path.join(self._log_dir, 'models')
filesystem.mkdir(save_path)
print('Saving model to: {}'.format(save_path))
self._model.save(save_path, self._total_timestep)
def _set_rollout_length(self):
min_epoch, max_epoch, min_length, max_length = self._rollout_schedule
if self._epoch <= min_epoch:
y = min_length
else:
dx = (self._epoch - min_epoch) / (max_epoch - min_epoch)
dx = min(dx, 1)
y = dx * (max_length - min_length) + min_length
self._rollout_length = int(y)
print('[ Model Length ] Epoch: {} (min: {}, max: {}) | Length: {} (min: {} , max: {})'.format(
self._epoch, min_epoch, max_epoch, self._rollout_length, min_length, max_length
))
def _reallocate_model_pool(self):
obs_space = self._pool._observation_space
act_space = self._pool._action_space
rollouts_per_epoch = self._rollout_batch_size * self._epoch_length / self._model_train_freq
model_steps_per_epoch = int(self._rollout_length * rollouts_per_epoch)
new_pool_size = self._model_retain_epochs * model_steps_per_epoch
if not hasattr(self, '_model_pool'):
print('[ MEEE ] Initializing new model pool with size {:.2e}'.format(
new_pool_size
))
self._model_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
elif self._model_pool._max_size != new_pool_size:
print('[ MEEE ] Updating model pool | {:.2e} --> {:.2e}'.format(
self._model_pool._max_size, new_pool_size
))
samples = self._model_pool.return_all_samples()
new_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)
new_pool.add_samples(samples)
assert self._model_pool.size == new_pool.size
self._model_pool = new_pool
def _train_model(self, **kwargs):
env_samples = self._pool.return_all_samples()
train_inputs, train_outputs = format_samples_for_training(env_samples)
model_metrics = self._model.train(train_inputs, train_outputs, **kwargs)
return model_metrics
def _rollout_model(self, rollout_batch_size, **kwargs):
print('[ Model Rollout ] Starting | Epoch: {} | Rollout length: {} | Batch size: {}'.format(
self._epoch, self._rollout_length, rollout_batch_size
))
batch = self.sampler.random_batch(rollout_batch_size)
obs = batch['observations']
steps_added = []
for i in range(self._rollout_length):
act = self._policy.actions_np(obs)
next_obs, rew, term, info = self.fake_env.step(obs, act, **kwargs)
steps_added.append(len(obs))
samples = {'observations': obs, 'actions': act, 'next_observations': next_obs, 'rewards': rew, 'terminals': term, 'stds': info['dev'][:,None]}
self._model_pool.add_samples(samples)
nonterm_mask = ~term.squeeze(-1)
if nonterm_mask.sum() == 0:
print('[ Model Rollout ] Breaking early: {} | {} / {}'.format(i, nonterm_mask.sum(), nonterm_mask.shape))
break
obs = next_obs[nonterm_mask]
mean_rollout_length = sum(steps_added) / rollout_batch_size
rollout_stats = {'mean_rollout_length': mean_rollout_length}
print('[ Model Rollout ] Added: {:.1e} | Model pool: {:.1e} (max {:.1e}) | Length: {} | Train rep: {}'.format(
sum(steps_added), self._model_pool.size, self._model_pool._max_size, mean_rollout_length, self._n_train_repeat
))
return rollout_stats
def _visualize_model(self, env, timestep):
env.unwrapped.state_vector()
qpos_dim = len(env.unwrapped.sim.data.qpos)
qpos = state[:qpos_dim]
qvel = state[qpos_dim:]
print('[ Visualization ] Starting | Epoch {} | Log dir: {}\n'.format(self._epoch, self._log_dir))
visualize_policy(env, self.fake_env, self._policy, self._writer, timestep)
print('[ Visualization ] Done')
rapped.set_state(qpos, qvel)
def _training_batch(self, batch_size=None):
batch_size = batch_size or self.sampler._batch_size
env_batch_size = int(batch_size*self._real_ratio)
model_batch_size = batch_size - env_batch_size
)
if model_batch_size > 0:
model_batch = self._model_pool.random_batch(model_batch_size)
keys = env_batch.keys()
batch = {k: np.concatenate((env_batch[k], model_batch[k]), axis=0) for k in keys}
else:
ep(self):
self.global_step = training_util.get_or_create_global_step()
self._training_ops.update({
'increment_global_step': training_util._increment_global_step(1)
})
def _init_placeholders(self):
self._iteration_ph = tf.placeholder(
tf.int64, shape=None, name='iteration')
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, *self._observation_shape),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='rewards',
)
self._stds_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='stds',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='terminals',
)
if self._store_extra_policy_info:
self._log_pis_ph = tf.placeholder(
tf.float32,
shape=(None, 1),
name='log_pis',
)
self._raw_actions_ph = tf.placeholder(
tf.float32,
shape=(None, *self._action_shape),
name='raw_actions',
)
def _get_Q_target(self):
next_actions = self._policy.actions([self._next_observations_ph])
next_log_pis = self._policy.log_pis(
[self._next_observations_ph], next_actions)
next_Qs_values = tuple(
Q([self._next_observations_ph, next_actions])
for Q in self._Q_targets)
min_next_Q = tf.reduce_min(next_Qs_values, axis=0)
next_value = min_next_Q - self._alpha * next_log_pis
Q_target = td_target(
reward=self._reward_scale * self._rewards_ph,
discount=self._discount,
next_value=(1 - self._terminals_ph) * next_value)
return Q_target
def _init_critic_update(self):
Q_target = tf.stop_gradient(self._get_Q_target())
assert Q_target.shape.as_list() == [None, 1]
temperature_critic = 5.0
weight_target_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_critic))
Q_values = self._Q_values = tuple(
Q([self._observations_ph, self._actions_ph])
for Q in self._Qs)
Q_losses = self._Q_losses = tuple(
tf.losses.mean_squared_error(
labels=Q_target, predictions=Q_value, weights=weight_target_Q)
for Q_value in Q_values)
self._Q_optimizers = tuple(
tf.train.AdamOptimizer(
learning_rate=self._Q_lr,
name='{}_{}_optimizer'.format(Q._name, i)
) for i, Q in enumerate(self._Qs))
Q_training_ops = tuple(
tf.contrib.layers.optimize_loss(
Q_loss,
self.global_step,
learning_rate=self._Q_lr,
optimizer=Q_optimizer,
variables=Q.trainable_variables,
increment_global_step=False,
summaries=((
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ()))
for i, (Q, Q_loss, Q_optimizer)
in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))
self._training_ops.update({'Q': tf.group(Q_training_ops)})
def _init_actor_update(self):
actions = self._policy.actions([self._observations_ph])
log_pis = self._policy.log_pis([self._observations_ph], actions)
assert log_pis.shape.as_list() == [None, 1]
log_alpha = self._log_alpha = tf.get_variable(
'log_alpha',
dtype=tf.float32,
initializer=0.0)
alpha = tf.exp(log_alpha)
if isinstance(self._target_entropy, Number):
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis + self._target_entropy))
self._alpha_optimizer = tf.train.AdamOptimizer(
self._policy_lr, name='alpha_optimizer')
self._alpha_train_op = self._alpha_optimizer.minimize(
loss=alpha_loss, var_list=[log_alpha])
self._training_ops.update({
'temperature_alpha': self._alpha_train_op
})
self._alpha = alpha
if self._action_prior == 'normal':
policy_prior = tf.contrib.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_shape),
scale_diag=tf.ones(self._action_shape))
policy_prior_log_probs = policy_prior.log_prob(actions)
elif self._action_prior == 'uniform':
policy_prior_log_probs = 0.0
Q_log_targets = tuple(
Q([self._observations_ph, actions])
for Q in self._Qs)
min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)
temperature_act = 5.0
weight_actor_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_act) + 0.5)
if self._reparameterize:
policy_kl_losses = (
alpha * log_pis
- min_Q_log_target
- policy_prior_log_probs) * weight_actor_Q
else:
raise NotImplementedError
assert policy_kl_losses.shape.as_list() == [None, 1]
policy_loss = tf.reduce_mean(policy_kl_losses)
self._policy_optimizer = tf.train.AdamOptimizer(
learning_rate=self._policy_lr,
name="policy_optimizer")
policy_train_op = tf.contrib.layers.optimize_loss(
policy_loss,
self.global_step,
learning_rate=self._policy_lr,
optimizer=self._policy_optimizer,
variables=self._policy.trainable_variables,
increment_global_step=False,
summaries=(
"loss", "gradients", "gradient_norm", "global_gradient_norm"
) if self._tf_summaries else ())
self._training_ops.update({'policy_train_op': policy_train_op})
def _init_training(self):
self._update_target(tau=1.0)
def _update_target(self, tau=None):
tau = tau or self._tau
for Q, Q_target in zip(self._Qs, self._Q_targets):
source_params = Q.get_weights()
target_params = Q_target.get_weights()
Q_target.set_weights([
tau * source + (1.0 - tau) * target
for source, target in zip(source_params, target_params)
])
def _do_training(self, iteration, batch):
self._training_progress.update()
self._training_progress.set_description()
feed_dict = self._get_feed_dict(iteration, batch)
self._session.run(self._training_ops, feed_dict)
if iteration % self._target_update_interval == 0:
self._update_target()
def _get_feed_dict(self, iteration, batch):
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
self._stds_ph: batch['stds'],
}
if self._store_extra_policy_info:
feed_dict[self._log_pis_ph] = batch['log_pis']
feed_dict[self._raw_actions_ph] = batch['raw_actions']
if iteration is not None:
feed_dict[self._iteration_ph] = iteration
return feed_dict
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
feed_dict = self._get_feed_dict(iteration, batch)
(Q_values, Q_losses, alpha, global_step) = self._session.run(
(self._Q_values,
self._Q_losses,
self._alpha,
self.global_step),
feed_dict)
diagnostics = OrderedDict({
'Q-avg': np.mean(Q_values),
'Q-std': np.std(Q_values),
'Q_loss': np.mean(Q_losses),
'alpha': alpha,
})
policy_diagnostics = self._policy.get_diagnostics(
batch['observations'])
diagnostics.update({
f'policy/{key}': value
for key, value in policy_diagnostics.items()
})
if self._plotter:
self._plotter.draw()
return diagnostics
@property
def tf_saveables(self):
saveables = {
'_policy_optimizer': self._policy_optimizer,
**{
f'Q_optimizer_{i}': optimizer
for i, optimizer in enumerate(self._Q_optimizers)
},
'_log_alpha': self._log_alpha,
}
if hasattr(self, '_alpha_optimizer'):
saveables['_alpha_optimizer'] = self._alpha_optimizer
return saveables
| true | true |
f7302be119c81384731038eacea54af1c0b78722 | 2,312 | py | Python | customSDK/servicefabric/models/service_backup_configuration_info.py | hans-olav/service-fabric-cli | baf27342ad4b9f74dee1954e60ed5b40ebcf039d | [
"MIT"
] | null | null | null | customSDK/servicefabric/models/service_backup_configuration_info.py | hans-olav/service-fabric-cli | baf27342ad4b9f74dee1954e60ed5b40ebcf039d | [
"MIT"
] | null | null | null | customSDK/servicefabric/models/service_backup_configuration_info.py | hans-olav/service-fabric-cli | baf27342ad4b9f74dee1954e60ed5b40ebcf039d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .backup_configuration_info import BackupConfigurationInfo
class ServiceBackupConfigurationInfo(BackupConfigurationInfo):
"""Backup configuration information for a specific Service Fabric service
specifying what backup policy is being applied and suspend description, if
any.
:param policy_name: The name of the backup policy which is applicable to
this Service Fabric application or service or partition.
:type policy_name: str
:param policy_inherited_from: Specifies the scope at which the backup
policy is applied. Possible values include: 'Invalid', 'Partition',
'Service', 'Application'
:type policy_inherited_from: str or
~azure.servicefabric.models.BackupPolicyScope
:param suspension_info: Describes the backup suspension details.
:type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo
:param kind: Constant filled by server.
:type kind: str
:param service_name: The full name of the service with 'fabric:' URI
scheme.
:type service_name: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'policy_name': {'key': 'PolicyName', 'type': 'str'},
'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'},
'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'},
'kind': {'key': 'Kind', 'type': 'str'},
'service_name': {'key': 'ServiceName', 'type': 'str'},
}
def __init__(self, policy_name=None, policy_inherited_from=None, suspension_info=None, service_name=None):
super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info)
self.service_name = service_name
self.kind = 'Service'
| 43.622642 | 163 | 0.67301 |
from .backup_configuration_info import BackupConfigurationInfo
class ServiceBackupConfigurationInfo(BackupConfigurationInfo):
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'policy_name': {'key': 'PolicyName', 'type': 'str'},
'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'},
'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'},
'kind': {'key': 'Kind', 'type': 'str'},
'service_name': {'key': 'ServiceName', 'type': 'str'},
}
def __init__(self, policy_name=None, policy_inherited_from=None, suspension_info=None, service_name=None):
super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info)
self.service_name = service_name
self.kind = 'Service'
| true | true |
f7302cc54f16c065e96db6b3d234be1df4223db1 | 691 | py | Python | parlai/tasks/squad/test.py | zl930216/ParlAI | abf0ad6d1779af0f8ce0b5aed00d2bab71416684 | [
"MIT"
] | 9,228 | 2017-05-03T03:40:34.000Z | 2022-03-31T14:03:29.000Z | parlai/tasks/squad/test.py | zl930216/ParlAI | abf0ad6d1779af0f8ce0b5aed00d2bab71416684 | [
"MIT"
] | 2,660 | 2017-05-03T23:06:02.000Z | 2022-03-31T21:24:29.000Z | parlai/tasks/squad/test.py | zl930216/ParlAI | abf0ad6d1779af0f8ce0b5aed00d2bab71416684 | [
"MIT"
] | 2,058 | 2017-05-04T12:19:48.000Z | 2022-03-31T10:28:11.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.utils.testing import AutoTeacherTest
class TestDefaultTeacher(AutoTeacherTest):
task = "squad"
class TestIndexTeacher(AutoTeacherTest):
task = "squad:index"
class TestOpensquadTeacher(AutoTeacherTest):
task = "squad:opensquad"
class TestFulldocTeacher(AutoTeacherTest):
task = "squad:fulldoc"
class TestSentenceTeacher(AutoTeacherTest):
task = "squad:sentence"
class TestFulldocsentenceTeacher(AutoTeacherTest):
task = "squad:fulldocsentence"
| 21.59375 | 65 | 0.76411 |
from parlai.utils.testing import AutoTeacherTest
class TestDefaultTeacher(AutoTeacherTest):
task = "squad"
class TestIndexTeacher(AutoTeacherTest):
task = "squad:index"
class TestOpensquadTeacher(AutoTeacherTest):
task = "squad:opensquad"
class TestFulldocTeacher(AutoTeacherTest):
task = "squad:fulldoc"
class TestSentenceTeacher(AutoTeacherTest):
task = "squad:sentence"
class TestFulldocsentenceTeacher(AutoTeacherTest):
task = "squad:fulldocsentence"
| true | true |
f7302ce59810c52de7625664e0887f9d75b5fb56 | 6,150 | py | Python | .docs/conf.py | Gael-de-Sailly/flopy | 4104cf5e6a35e2a1fd6183442962ae5cb258fa7a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | .docs/conf.py | Gael-de-Sailly/flopy | 4104cf5e6a35e2a1fd6183442962ae5cb258fa7a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | .docs/conf.py | Gael-de-Sailly/flopy | 4104cf5e6a35e2a1fd6183442962ae5cb258fa7a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# add flopy root directory to the python path
sys.path.insert(0, os.path.abspath(".."))
from flopy import __version__
# -- determine if running on readthedocs ------------------------------------
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# -- create source rst files ------------------------------------------------
cmd = "sphinx-apidoc -e -o source/ ../flopy/"
print(cmd)
os.system(cmd)
# -- programatically create rst files ---------------------------------------
cmd = ("python", "create_rstfiles.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
# -- convert the tutorial scripts -------------------------------------------
if not on_rtd:
cmd = ("python", "create_tutorials.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
# -- Project information -----------------------------------------------------
project = "flopy Documentation"
copyright = "2020, Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
author = "Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
# The version.
version = __version__
release = __version__
language = None
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"IPython.sphinxext.ipython_console_highlighting", # lowercase didn't work
"sphinx.ext.autosectionlabel",
"nbsphinx",
"nbsphinx_link",
"recommonmark",
]
# Settings for GitHub actions integration
if on_rtd:
extensions.append("rtds_action")
rtds_action_github_repo = "modflowpy/flopy"
rtds_action_path = "_notebooks"
rtds_action_artifact_prefix = "notebooks-for-"
rtds_action_github_token = os.environ.get("GITHUB_TOKEN", None)
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"github_url": "https://github.com/modflowpy/flopy",
"use_edit_page_button": False,
}
autosummary_generate = True
numpydoc_show_class_members = False
html_context = {
"github_user": "flopy",
"github_repo": "flopy",
"github_version": "master",
"doc_path": "doc",
}
html_css_files = [
"css/custom.css",
]
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "flopy"
html_favicon = "_images/flopylogo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = "flopydoc"
| 33.791209 | 217 | 0.677073 |
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
from flopy import __version__
on_rtd = os.environ.get('READTHEDOCS') == 'True'
cmd = "sphinx-apidoc -e -o source/ ../flopy/"
print(cmd)
os.system(cmd)
cmd = ("python", "create_rstfiles.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
if not on_rtd:
cmd = ("python", "create_tutorials.py")
print(" ".join(cmd))
os.system(" ".join(cmd))
project = "flopy Documentation"
copyright = "2020, Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
author = "Bakker, Mark, Post, Vincent, Langevin, C. D., Hughes, J. D., White, J. T., Leaf, A. T., Paulinski, S. R., Larsen, J. D., Toews, M. W., Morway, E. D., Bellino, J. C., Starn, J. J., and Fienen, M. N."
version = __version__
release = __version__
language = None
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"IPython.sphinxext.ipython_console_highlighting",
"sphinx.ext.autosectionlabel",
"nbsphinx",
"nbsphinx_link",
"recommonmark",
]
# Settings for GitHub actions integration
if on_rtd:
extensions.append("rtds_action")
rtds_action_github_repo = "modflowpy/flopy"
rtds_action_path = "_notebooks"
rtds_action_artifact_prefix = "notebooks-for-"
rtds_action_github_token = os.environ.get("GITHUB_TOKEN", None)
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"github_url": "https://github.com/modflowpy/flopy",
"use_edit_page_button": False,
}
autosummary_generate = True
numpydoc_show_class_members = False
html_context = {
"github_user": "flopy",
"github_repo": "flopy",
"github_version": "master",
"doc_path": "doc",
}
html_css_files = [
"css/custom.css",
]
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "flopy"
html_favicon = "_images/flopylogo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = "flopydoc"
| true | true |
f7302d7fbc14ba5b762d875c3cc9ddd617ab5ad6 | 77,626 | py | Python | test_integration/geopm_test_integration.py | RyoTTa/geopm | 74246c8ce70ee47f53bc5629638f51c2c391027b | [
"BSD-3-Clause"
] | null | null | null | test_integration/geopm_test_integration.py | RyoTTa/geopm | 74246c8ce70ee47f53bc5629638f51c2c391027b | [
"BSD-3-Clause"
] | null | null | null | test_integration/geopm_test_integration.py | RyoTTa/geopm | 74246c8ce70ee47f53bc5629638f51c2c391027b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import sys
import unittest
import subprocess
import time
import pandas
import collections
import socket
import shlex
import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test_integration import util
from test_integration import geopm_test_launcher
import geopmpy.io
import geopmpy.launcher
def create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):
"""Create a frequency map to be consumed by the frequency map agent.
Arguments:
min_freq: Floor frequency for the agent
max_freq: Ceiling frequency for the agent
frequency_map: Dictionary mapping region names to frequencies
use_env: If true, apply the map to an environment variable, and return
the policy needed when the environment variable is in use.
Otherwise, clear the environment variable and return the policy
needed when the variable is not in use.
"""
policy = {'frequency_min': min_freq, 'frequency_max': max_freq}
known_hashes = {
'dgemm': 0x00000000a74bbf35,
'all2all': 0x000000003ddc81bf,
'stream': 0x00000000d691da00,
'sleep': 0x00000000536c798f,
'MPI_Barrier': 0x000000007b561f45,
'model-init': 0x00000000644f9787,
'unmarked-region': 0x00000000725e8066 }
if use_env:
os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)
else:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
for i, (region_name, frequency) in enumerate(frequency_map.items()):
region_hash = known_hashes[region_name]
policy['HASH_{}'.format(i)] = int(region_hash)
policy['FREQ_{}'.format(i)] = frequency
return policy
class TestIntegration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self._agent = 'power_governor'
self._options = {'power_budget': 150}
self._tmp_files = []
self._output = None
self._power_limit = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self._frequency = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')
def tearDown(self):
geopm_test_launcher.geopmwrite("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 " + str(self._power_limit))
geopm_test_launcher.geopmwrite("MSR::PERF_CTL:FREQ board 0 " + str(self._frequency))
if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:
if self._output is not None:
self._output.remove_files()
for ff in self._tmp_files:
try:
os.remove(ff)
except OSError:
pass
if self._original_freq_map_env is None:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
else:
os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env
def assertNear(self, a, b, epsilon=0.05, msg=''):
denom = a if a != 0 else 1
if abs((a - b) / denom) >= epsilon:
self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))
def create_progress_df(self, df):
# Build a df with only the first region entry and the exit.
df = df.reset_index(drop=True)
last_index = 0
filtered_df = pandas.DataFrame()
row_list = []
progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]
for index, _ in progress_1s.iteritems():
row = df.loc[last_index:index].head(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
row = df.loc[last_index:index].tail(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
last_index = index + 1 # Set the next starting index to be one past where we are
filtered_df = pandas.concat(row_list)
return filtered_df
def test_report_and_trace_generation(self):
name = 'test_report_and_trace_generation'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
def test_no_report_and_trace_generation(self):
name = 'test_no_report_and_trace_generation'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
@unittest.skipUnless('mr-fusion' in socket.gethostname(), "This test only enabled on known working systems.")
def test_report_and_trace_generation_pthread(self):
name = 'test_report_and_trace_generation_pthread'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('pthread')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() != "aprun",
'ALPS does not support multi-application launch on the same nodes.')
@util.skip_unless_batch()
def test_report_and_trace_generation_application(self):
name = 'test_report_and_trace_generation_application'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('application')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() == "srun" and os.getenv('SLURM_NODELIST') is None,
'Requires non-sbatch SLURM session for alloc\'d and idle nodes.')
def test_report_generation_all_nodes(self):
name = 'test_report_generation_all_nodes'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
time.sleep(5) # Wait a moment to finish cleaning-up from a previous test
idle_nodes = launcher.get_idle_nodes()
idle_nodes_copy = list(idle_nodes)
alloc_nodes = launcher.get_alloc_nodes()
launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))
launcher.write_log(name, 'Alloc\'d nodes : {nodes}'.format(nodes=alloc_nodes))
node_names = []
for nn in idle_nodes_copy:
launcher.set_node_list(nn.split()) # Hack to convert string to list
try:
launcher.run(name)
node_names += nn.split()
except subprocess.CalledProcessError as e:
if e.returncode == 1 and nn not in launcher.get_idle_nodes():
launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))
idle_nodes.remove(nn)
else:
launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))
raise e
ao = geopmpy.io.AppOutput(report_path, do_cache=False)
sleep_data = ao.get_report_data(node_name=nn, region='sleep')
app_data = ao.get_app_total_data(node_name=nn)
self.assertNotEqual(0, len(sleep_data))
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
self.assertEqual(len(node_names), len(idle_nodes))
def test_runtime(self):
name = 'test_runtime'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, report['runtime'].item())
self.assertGreater(app_total['runtime'].item(), report['runtime'].item())
def test_runtime_epoch(self):
name = 'test_runtime_epoch'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()
self.assertNear(total_runtime, epoch_data['runtime'].item())
def test_epoch_data_valid(self):
name = 'test_epoch_data_valid'
report_path = name + '.report'
num_node = 1
num_rank = 1
big_o = 1.0
loop_count = 10
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin-unmarked', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
report = geopmpy.io.RawReport(report_path)
node_names = report.host_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
regions = report.region_names(nn)
self.assertTrue('model-init' not in regions)
totals = report.raw_totals(nn)
unmarked = report.raw_region(nn, 'unmarked-region')
epoch = report.raw_epoch(nn)
# Epoch has valid data
self.assertGreater(epoch['runtime (sec)'], 0)
self.assertGreater(epoch['sync-runtime (sec)'], 0)
self.assertGreater(epoch['package-energy (joules)'], 0)
self.assertGreater(epoch['dram-energy (joules)'], 0)
self.assertGreater(epoch['power (watts)'], 0)
self.assertGreater(epoch['frequency (%)'], 0)
self.assertGreater(epoch['frequency (Hz)'], 0)
self.assertEqual(epoch['count'], loop_count)
# Runtime
self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],
'''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT
>= the Epoch runtime.''')
# Package Energy (joules)
self.assertTrue(totals['package-energy (joules)'] >
unmarked['package-energy (joules)'] >=
epoch['package-energy (joules)'],
'''The total package energy (joules) is NOT > the unmarked package energy (joules)
or the unmarked package energy (joules) is NOT >= the Epoch package
energy (joules).''')
# DRAM Energy
self.assertTrue(totals['dram-energy (joules)'] >
unmarked['dram-energy (joules)'] >=
epoch['dram-energy (joules)'],
'''The total dram energy is NOT > the unmarked dram energy or the unmarked
dram energy is NOT >= the Epoch dram energy.''')
# Sync-runtime
self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],
'''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')
def test_runtime_nested(self):
name = 'test_runtime_nested'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
loop_count = 2
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('nested-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_totals = self._output.get_app_total_data(node_name=nn)
# The spin sections of this region sleep for 'delay' seconds twice per loop.
self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())
self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)
self.assertGreater(app_totals['network-time'].item(), 0)
self.assertGreater(0.1, app_totals['network-time'].item())
self.assertEqual(loop_count, spin_data['count'].item())
def test_trace_runtimes(self):
name = 'test_trace_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
trace = self._output.get_trace_data(node_name=nn)
app_totals = self._output.get_app_total_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))
# Calculate runtime totals for each region in each trace, compare to report
tt = trace.reset_index(level='index') # move 'index' field from multiindex to columns
tt = tt.set_index(['REGION_HASH'], append=True) # add region_hash column to multiindex
tt_reg = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and
not region_name.startswith('MPI_') and
region_data['sync_runtime'].item() != 0):
region_hash = region_data['id'].item()
trace_data = tt_reg.get_group(region_hash)
start_idx = trace_data.iloc[0]['index']
end_idx = trace_data.iloc[-1]['index'] + 1 # use time from sample after exiting region
start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()
end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()
trace_elapsed_time = end_time - start_time
msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)
self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)
#epoch
region_data = self._output.get_report_data(node_name=nn, region='epoch')
trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]
msg = 'for epoch on node {nn}'.format(nn=nn)
self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)
@util.skip_unless_config_enable('bloat')
def test_runtime_regulator(self):
name = 'test_runtime_regulator'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 20
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
sleep_big_o = 1.0
spin_big_o = 0.5
expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}
app_conf.append_region('sleep', sleep_big_o)
app_conf.append_region('spin', spin_big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
app_totals = self._output.get_app_total_data(node_name=nn)
trace = self._output.get_trace_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())
tt = trace.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:
trace_data = tt.get_group(region_data['id'].item())
filtered_df = self.create_progress_df(trace_data)
first_time = False
epsilon = 0.001 if region_name != 'sleep' else 0.05
for index, df in filtered_df.iterrows():
if df['REGION_PROGRESS'] == 1:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
first_time = True
if first_time is True and df['REGION_PROGRESS'] == 0:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
@util.skip_unless_run_long_tests()
@util.skip_unless_config_enable('bloat')
def test_region_runtimes(self):
name = 'test_region_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
# Calculate region times from traces
region_times = collections.defaultdict(lambda: collections.defaultdict(dict))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])
for region_hash, data in tt:
filtered_df = self.create_progress_df(data)
filtered_df = filtered_df.diff()
# Since I'm not separating out the progress 0's from 1's, when I do the diff I only care about the
# case where 1 - 0 = 1 for the progress column.
filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]
if len(filtered_df) > 1:
launcher.write_log(name, 'Region elapsed time stats from {} - {} :\n{}'\
.format(nn, region_hash, filtered_df['TIME'].describe()))
filtered_df['TIME'].describe()
region_times[nn][region_hash] = filtered_df
launcher.write_log(name, '{}'.format('-' * 80))
# Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.
regions = self._output.get_region_names()
write_regions = True
for nn in node_names:
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name != 'epoch' and
rr['id'].item() != 0 and
rr['count'].item() > 1):
if write_regions:
launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))
runtime = rr['sync_runtime'].item()
self.assertNear(runtime,
region_times[nn][rr['id'].item()]['TIME'].sum())
write_regions = False
# Test to ensure every region detected in the trace is captured in the report.
for nn in node_names:
report_ids = []
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
report_ids.append(rr['id'].item())
for region_hash in region_times[nn].keys():
self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))
def test_progress(self):
name = 'test_progress'
report_path = name + '.report'
num_node = 1
num_rank = 4
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
def test_count(self):
name = 'test_count'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
delay = 0.01
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
trace_data = self._output.get_trace_data(node_name=nn)
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
self.assertNear(delay * loop_count, spin_data['runtime'].item())
self.assertEqual(loop_count, spin_data['count'].item())
self.assertEqual(loop_count, epoch_data['count'].item())
self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])
@util.skip_unless_run_long_tests()
def test_scaling(self):
"""
This test will start at ${num_node} nodes and ranks. It will then calls check_run() to
ensure that commands can be executed successfully on all of the allocated compute nodes.
Afterwards it will run the specified app config on each node and verify the reports. When
complete it will double num_node and run the steps again.
WARNING: This test can take a long time to run depending on the number of starting nodes and
the size of the allocation.
"""
name = 'test_scaling'
report_path = name + '.report'
num_node = 2
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)
check_successful = True
while check_successful:
launcher.set_num_node(num_node)
launcher.set_num_rank(num_node)
try:
launcher.check_run(name)
except subprocess.CalledProcessError as e:
# If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1
# All other rc's are real errors
if e.returncode != 1:
raise e
check_successful = False
if check_successful:
launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
self.assertEqual(loop_count, dgemm_data['count'].item())
self.assertEqual(loop_count, all2all_data['count'].item())
self.assertGreater(dgemm_data['runtime'].item(), 0.0)
self.assertGreater(all2all_data['runtime'].item(), 0.0)
num_node *= 2
self._output.remove_files()
@util.skip_unless_run_long_tests()
def test_power_consumption(self):
name = 'test_power_consumption'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
self._options['power_budget'] = 130
else:
self._options['power_budget'] = 200
gov_agent_conf_path = name + '_gov_agent.config'
self._tmp_files.append(gov_agent_conf_path)
gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,
trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
all_power_data = {}
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} :\n{}'.format(nn, power_data.describe()))
all_power_data[nn] = power_data
for node_name, power_data in all_power_data.items():
# Allow for overages of 2% at the 75th percentile.
self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))
# TODO Checks on the maximum power computed during the run?
# TODO Checks to see how much power was left on the table?
@util.skip_unless_run_long_tests()
@util.skip_unless_batch()
def test_power_balancer(self):
name = 'test_power_balancer'
num_node = 4
num_rank = 16
loop_count = 500
# Require that the balancer moves the maximum dgemm runtime at
# least 1/4 the distance to the mean dgemm runtime under the
# governor.
margin_factor = 0.25
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm-imbalance', 8.0)
app_conf.append_region('all2all', 0.05)
app_conf.set_loop_count(loop_count)
# Update app config with imbalance
alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()
for nn in range(len(alloc_nodes) // 2):
app_conf.append_imbalance(alloc_nodes[nn], 0.5)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
power_budget = 130
else:
power_budget = 200
self._options = {'power_budget': power_budget}
gov_agent_conf_path = name + '_gov_agent.config'
bal_agent_conf_path = name + '_bal_agent.config'
self._tmp_files.append(gov_agent_conf_path)
self._tmp_files.append(bal_agent_conf_path)
agent_list = ['power_governor', 'power_balancer']
path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}
agent_runtime = dict()
for agent in agent_list:
agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)
run_name = '{}_{}'.format(name, agent)
report_path = '{}.report'.format(run_name)
trace_path = '{}.trace'.format(run_name)
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, time_limit=2700)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))
launcher.run(run_name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
power_limits = []
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} {} :\n{}'.format(agent, nn, power_data.describe()))
# Get final power limit set on the node
if agent == 'power_balancer':
power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])
if agent == 'power_balancer':
avg_power_limit = sum(power_limits) / len(power_limits)
self.assertTrue(avg_power_limit <= power_budget)
min_runtime = float('nan')
max_runtime = float('nan')
node_names = self._output.get_node_names()
runtime_list = []
for node_name in node_names:
epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')
runtime_list.append(epoch_data['runtime'].item())
if agent == 'power_governor':
mean_runtime = sum(runtime_list) / len(runtime_list)
max_runtime = max(runtime_list)
margin = margin_factor * (max_runtime - mean_runtime)
agent_runtime[agent] = max(runtime_list)
self.assertGreater(agent_runtime['power_governor'] - margin,
agent_runtime['power_balancer'],
"governor runtime: {}, balancer runtime: {}, margin: {}".format(
agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))
def test_progress_exit(self):
"""
Check that when we always see progress exit before the next entry.
Make sure that progress only decreases when a new region is entered.
"""
name = 'test_progress_exit'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 100
big_o = 0.1
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm-progress', big_o)
app_conf.append_region('spin-progress', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
tt = tt.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_hash, data in tt:
tmp = data['REGION_PROGRESS'].diff()
#@todo legacy branch?
# Look for changes in progress that are more negative
# than can be expected due to extrapolation error.
if region_hash == 8300189175:
negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]
launcher.write_log(name, '{}'.format(negative_progress))
self.assertEqual(0, len(negative_progress))
@util.skip_unless_run_long_tests()
@util.skip_unless_optimized()
def test_sample_rate(self):
"""
Check that sample rate is regular and fast.
"""
name = 'test_sample_rate'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 10
big_o = 10.0
region = 'dgemm-progress'
max_mean = 0.01 # 10 millisecond max sample period
max_nstd = 0.1 # 10% normalized standard deviation (std / mean)
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region(region, big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
delta_t = tt['TIME'].diff()
delta_t = delta_t.loc[delta_t != 0]
self.assertGreater(max_mean, delta_t.mean())
# WARNING : The following line may mask issues in the sampling rate. To do a fine grained analysis, comment
# out the next line and do NOT run on the BSP. This will require modifications to the launcher or manual testing.
size_orig = len(delta_t)
delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()] # Only keep samples within 3 stds of the mean
self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))
self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
def test_network_times(self):
name = 'test_network_times'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')
unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertEqual(0, unmarked_data['count'].item())
# Since MPI time is is counted if any rank on a node is in
# an MPI call, but region time is counted only when all
# ranks on a node are in a region, we must use the
# unmarked-region time as our error term when comparing
# MPI time and all2all time.
mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)
self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
epoch_data['network_time'].item())
# TODO: inconsistent; can we just use _ everywhere?
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
app_total['network-time'].item())
self.assertEqual(0, unmarked_data['network_time'].item())
self.assertEqual(0, sleep_data['network_time'].item())
self.assertEqual(0, dgemm_data['network_time'].item())
def test_ignore_runtime(self):
name = 'test_ignore_runtime'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('ignore', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
ignore_data = self._output.get_report_data(node_name=nn, region='ignore')
app_data = self._output.get_app_total_data(node_name=nn)
self.assertNear(ignore_data['runtime'].item(),
app_data['ignore-runtime'].item(), 0.00005)
@util.skip_unless_config_enable('ompt')
def test_unmarked_ompt(self):
name = 'test_unmarked_ompt'
report_path = name + '.report'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('stream-unmarked', 1.0)
app_conf.append_region('dgemm-unmarked', 1.0)
app_conf.append_region('all2all-unmarked', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
stream_id = None
region_names = self._output.get_region_names()
stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]
for nn in node_names:
stream_data = self._output.get_report_data(node_name=nn, region=stream_name)
found = False
for name in region_names:
if stream_name in name: # account for numbers at end of OMPT region names
found = True
self.assertTrue(found)
self.assertEqual(1, stream_data['count'].item())
if stream_id:
self.assertEqual(stream_id, stream_data['id'].item())
else:
stream_id = stream_data['id'].item()
ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]
self.assertLessEqual(2, len(ompt_regions))
self.assertTrue(('MPI_Alltoall' in region_names))
gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]
self.assertLessEqual(1, len(gemm_region))
def _test_agent_frequency_map(self, name, use_env=False):
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "frequency_map"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 5
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.append_region('all2all', 1.0)
app_conf.write()
freq_map = {}
freq_map['dgemm'] = sticker_freq
freq_map['stream'] = sticker_freq - 2 * freq_step
freq_map['all2all'] = min_freq
self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name in ['dgemm', 'stream', 'all2all']):
#todo verify trace frequencies
#todo verify agent report augment frequecies
msg = region_name + " frequency should be near assigned map frequency"
self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)
def test_agent_frequency_map_env(self):
"""
Test of the FrequencyMapAgent, setting a map through GEOPM_FREQUENCY_MAP.
"""
self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)
def test_agent_frequency_map_policy(self):
"""
Test of the FrequencyMapAgent, setting a map through the policy.
"""
self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)
def test_agent_energy_efficient_single_region(self):
"""
Test of the EnergyEfficientAgent against single region loop.
"""
name = 'test_energy_efficient_single_region'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', 0.1)
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
report = geopmpy.io.RawReport(report_path)
if (region_name in ['spin']):
region = report.raw_region(nn, region_name)
msg = region_name + " frequency should be minimum frequency as specified by policy"
self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg) # freq should reduce
@util.skip_unless_run_long_tests()
@util.skip_unless_cpufreq()
@util.skip_unless_batch()
def test_agent_energy_efficient(self):
"""
Test of the EnergyEfficientAgent.
"""
name = 'test_energy_efficient_sticker'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
num_node = 1
num_rank = 4
loop_count = 200
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
run = ['_sticker', '_nan_nan']
for rr in run:
report_path = name + rr + '.report'
trace_path = name + rr + '.trace'
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.write()
if rr == '_sticker':
self._options = {'frequency_min': sticker_freq,
'frequency_max': sticker_freq}
freq = sticker_freq
else:
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name + rr)
# compare the app_total runtime and energy and assert within bounds
report_path = name + run[0] + '.report'
trace_path = name + run[0] + '.trace'
sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
report_path = name + run[1] + '.report'
trace_path = name + run[1] + '.trace'
nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
for nn in nan_out.get_node_names():
sticker_app_total = sticker_out.get_app_total_data(node_name=nn)
nan_app_total = nan_out.get_app_total_data(node_name=nn)
runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()
energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()
self.assertLess(-0.1, runtime_savings_epoch) # want -10% or better
self.assertLess(0.0, energy_savings_epoch)
class TestIntegrationGeopmio(unittest.TestCase):
''' Tests of geopmread and geopmwrite.'''
def setUp(self):
self.skip_warning_string = 'Incompatible CPU'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line:
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_output_range(self, args, min_exp, max_exp):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() in line:
continue
if line.startswith(b'0x'):
value = int(line)
else:
value = float(line)
self.assertLessEqual(min_exp, value, msg="Value read for {} smaller than {}: {}.".format(args, min_exp, value))
self.assertGreaterEqual(max_exp, value, msg="Value read for {} larger than {}: {}.".format(args, max_exp, value))
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmread_command_line(self):
'''
Check that geopmread commandline arguments work.
'''
self.exec_name = "geopmread"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_output(['--domain', 'TIME'], ['cpu'])
# read signal
self.check_no_error(['TIME', 'board', '0'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'TIME'], ['Time in seconds'])
# errors
read_err = 'domain type and domain index are required'
self.check_output(['TIME'], [read_err])
self.check_output(['TIME', 'board'], [read_err])
self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])
self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])
self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])
self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmread_all_signal_agg(self):
'''
Check that all reported signals can be read for board, aggregating if necessary.
'''
self.exec_name = "geopmread"
all_signals = []
try:
proc = subprocess.Popen([self.exec_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
for sig in all_signals:
self.check_no_error([sig.decode(), 'board', '0'])
@util.skip_unless_batch()
def test_geopmread_signal_value(self):
'''
Check that some specific signals give a sane value.
'''
self.exec_name = "geopmread"
signal_range = {
"POWER_PACKAGE": (20, 400),
"FREQUENCY": (1.0e8, 5.0e9),
"TIME": (0, 10), # time in sec to start geopmread
"TEMPERATURE_CORE": (0, 100)
}
for signal, val_range in signal_range.items():
try:
self.check_no_error([signal, "board", "0"])
except:
raise
pass # skip missing signals
else:
self.check_output_range([signal, "board", "0"], *val_range)
def test_geopmread_custom_msr(self):
'''
Check that MSRIOGroup picks up additional MSRs in path.
'''
self.exec_name = "geopmread"
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))),
'examples/custom_msr/')
custom_env = os.environ.copy()
custom_env['GEOPM_PLUGIN_PATH'] = path
all_signals = []
try:
proc = subprocess.Popen([self.exec_name], env=custom_env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)
def test_geopmwrite_command_line(self):
'''
Check that geopmwrite commandline arguments work.
'''
self.exec_name = "geopmwrite"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_no_error(['--domain', 'FREQUENCY'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])
# errors
write_err = 'domain type, domain index, and value are required'
self.check_output(['FREQUENCY'], [write_err])
self.check_output(['FREQUENCY', 'board'], [write_err])
self.check_output(['FREQUENCY', 'board', '0'], [write_err])
self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])
self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])
self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])
self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])
self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmwrite_set_freq(self):
'''
Check that geopmwrite can be used to set frequency.
'''
def read_stdout_line(stdout):
line = stdout.readline()
while self.skip_warning_string.encode() in line:
line = stdout.readline()
return line.strip()
def read_current_freq(domain, signal='FREQUENCY'):
read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
freq = read_stdout_line(read_proc.stdout)
freq = float(freq)
return freq
def read_min_max_freq():
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
min_freq = read_stdout_line(read_proc.stdout)
min_freq = float(int(float(min_freq)/1e8)*1e8) # convert to multiple of 1e8
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
max_freq = read_stdout_line(read_proc.stdout)
max_freq = float(int(float(max_freq)/1e8)*1e8)
return min_freq, max_freq
self.exec_name = "geopmwrite"
read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
read_domain = read_stdout_line(read_proc.stdout).decode()
write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
write_domain = read_stdout_line(write_proc.stdout).decode()
min_freq, max_freq = read_min_max_freq()
old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')
self.assertLess(old_freq, max_freq * 2)
self.assertGreater(old_freq, min_freq - 1e8)
# set to min and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])
result = read_current_freq(read_domain)
self.assertEqual(min_freq, result)
# set to max and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])
result = read_current_freq(read_domain)
self.assertEqual(max_freq, result)
self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])
class TestIntegrationGeopmagent(unittest.TestCase):
''' Tests of geopmagent.'''
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
'''
Check that geopmagent commandline arguments work.
'''
# no args
agent_names = ['monitor', 'power_balancer', 'power_governor',
'energy_efficient', 'frequency_map']
self.check_output([], agent_names)
# help message
self.check_output(['--help'], ['Usage'])
# version
self.check_no_error(['--version'])
# agent policy and sample names
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
# policy file
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# default value policy
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],
{'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})
# unspecified policy values are accepted
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# errors
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],
['Number of policies', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
| 49.068268 | 165 | 0.618775 |
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import sys
import unittest
import subprocess
import time
import pandas
import collections
import socket
import shlex
import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test_integration import util
from test_integration import geopm_test_launcher
import geopmpy.io
import geopmpy.launcher
def create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):
policy = {'frequency_min': min_freq, 'frequency_max': max_freq}
known_hashes = {
'dgemm': 0x00000000a74bbf35,
'all2all': 0x000000003ddc81bf,
'stream': 0x00000000d691da00,
'sleep': 0x00000000536c798f,
'MPI_Barrier': 0x000000007b561f45,
'model-init': 0x00000000644f9787,
'unmarked-region': 0x00000000725e8066 }
if use_env:
os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)
else:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
for i, (region_name, frequency) in enumerate(frequency_map.items()):
region_hash = known_hashes[region_name]
policy['HASH_{}'.format(i)] = int(region_hash)
policy['FREQ_{}'.format(i)] = frequency
return policy
class TestIntegration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self._agent = 'power_governor'
self._options = {'power_budget': 150}
self._tmp_files = []
self._output = None
self._power_limit = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self._frequency = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')
def tearDown(self):
geopm_test_launcher.geopmwrite("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 " + str(self._power_limit))
geopm_test_launcher.geopmwrite("MSR::PERF_CTL:FREQ board 0 " + str(self._frequency))
if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:
if self._output is not None:
self._output.remove_files()
for ff in self._tmp_files:
try:
os.remove(ff)
except OSError:
pass
if self._original_freq_map_env is None:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
else:
os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env
def assertNear(self, a, b, epsilon=0.05, msg=''):
denom = a if a != 0 else 1
if abs((a - b) / denom) >= epsilon:
self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))
def create_progress_df(self, df):
df = df.reset_index(drop=True)
last_index = 0
filtered_df = pandas.DataFrame()
row_list = []
progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]
for index, _ in progress_1s.iteritems():
row = df.loc[last_index:index].head(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
row = df.loc[last_index:index].tail(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
last_index = index + 1
filtered_df = pandas.concat(row_list)
return filtered_df
def test_report_and_trace_generation(self):
name = 'test_report_and_trace_generation'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
def test_no_report_and_trace_generation(self):
name = 'test_no_report_and_trace_generation'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
@unittest.skipUnless('mr-fusion' in socket.gethostname(), "This test only enabled on known working systems.")
def test_report_and_trace_generation_pthread(self):
name = 'test_report_and_trace_generation_pthread'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('pthread')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() != "aprun",
'ALPS does not support multi-application launch on the same nodes.')
@util.skip_unless_batch()
def test_report_and_trace_generation_application(self):
name = 'test_report_and_trace_generation_application'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('application')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() == "srun" and os.getenv('SLURM_NODELIST') is None,
'Requires non-sbatch SLURM session for alloc\'d and idle nodes.')
def test_report_generation_all_nodes(self):
name = 'test_report_generation_all_nodes'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
time.sleep(5) # Wait a moment to finish cleaning-up from a previous test
idle_nodes = launcher.get_idle_nodes()
idle_nodes_copy = list(idle_nodes)
alloc_nodes = launcher.get_alloc_nodes()
launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))
launcher.write_log(name, 'Alloc\'d nodes : {nodes}'.format(nodes=alloc_nodes))
node_names = []
for nn in idle_nodes_copy:
launcher.set_node_list(nn.split())
try:
launcher.run(name)
node_names += nn.split()
except subprocess.CalledProcessError as e:
if e.returncode == 1 and nn not in launcher.get_idle_nodes():
launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))
idle_nodes.remove(nn)
else:
launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))
raise e
ao = geopmpy.io.AppOutput(report_path, do_cache=False)
sleep_data = ao.get_report_data(node_name=nn, region='sleep')
app_data = ao.get_app_total_data(node_name=nn)
self.assertNotEqual(0, len(sleep_data))
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
self.assertEqual(len(node_names), len(idle_nodes))
def test_runtime(self):
name = 'test_runtime'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, report['runtime'].item())
self.assertGreater(app_total['runtime'].item(), report['runtime'].item())
def test_runtime_epoch(self):
name = 'test_runtime_epoch'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()
self.assertNear(total_runtime, epoch_data['runtime'].item())
def test_epoch_data_valid(self):
name = 'test_epoch_data_valid'
report_path = name + '.report'
num_node = 1
num_rank = 1
big_o = 1.0
loop_count = 10
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin-unmarked', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
report = geopmpy.io.RawReport(report_path)
node_names = report.host_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
regions = report.region_names(nn)
self.assertTrue('model-init' not in regions)
totals = report.raw_totals(nn)
unmarked = report.raw_region(nn, 'unmarked-region')
epoch = report.raw_epoch(nn)
self.assertGreater(epoch['runtime (sec)'], 0)
self.assertGreater(epoch['sync-runtime (sec)'], 0)
self.assertGreater(epoch['package-energy (joules)'], 0)
self.assertGreater(epoch['dram-energy (joules)'], 0)
self.assertGreater(epoch['power (watts)'], 0)
self.assertGreater(epoch['frequency (%)'], 0)
self.assertGreater(epoch['frequency (Hz)'], 0)
self.assertEqual(epoch['count'], loop_count)
self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],
'''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT
>= the Epoch runtime.''')
self.assertTrue(totals['package-energy (joules)'] >
unmarked['package-energy (joules)'] >=
epoch['package-energy (joules)'],
'''The total package energy (joules) is NOT > the unmarked package energy (joules)
or the unmarked package energy (joules) is NOT >= the Epoch package
energy (joules).''')
self.assertTrue(totals['dram-energy (joules)'] >
unmarked['dram-energy (joules)'] >=
epoch['dram-energy (joules)'],
'''The total dram energy is NOT > the unmarked dram energy or the unmarked
dram energy is NOT >= the Epoch dram energy.''')
self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],
'''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')
def test_runtime_nested(self):
name = 'test_runtime_nested'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
loop_count = 2
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('nested-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_totals = self._output.get_app_total_data(node_name=nn)
self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())
self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)
self.assertGreater(app_totals['network-time'].item(), 0)
self.assertGreater(0.1, app_totals['network-time'].item())
self.assertEqual(loop_count, spin_data['count'].item())
def test_trace_runtimes(self):
name = 'test_trace_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
trace = self._output.get_trace_data(node_name=nn)
app_totals = self._output.get_app_total_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))
tt = trace.reset_index(level='index')
tt = tt.set_index(['REGION_HASH'], append=True)
tt_reg = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and
not region_name.startswith('MPI_') and
region_data['sync_runtime'].item() != 0):
region_hash = region_data['id'].item()
trace_data = tt_reg.get_group(region_hash)
start_idx = trace_data.iloc[0]['index']
end_idx = trace_data.iloc[-1]['index'] + 1
start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()
end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()
trace_elapsed_time = end_time - start_time
msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)
self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)
region_data = self._output.get_report_data(node_name=nn, region='epoch')
trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]
msg = 'for epoch on node {nn}'.format(nn=nn)
self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)
@util.skip_unless_config_enable('bloat')
def test_runtime_regulator(self):
name = 'test_runtime_regulator'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 20
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
sleep_big_o = 1.0
spin_big_o = 0.5
expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}
app_conf.append_region('sleep', sleep_big_o)
app_conf.append_region('spin', spin_big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
app_totals = self._output.get_app_total_data(node_name=nn)
trace = self._output.get_trace_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())
tt = trace.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:
trace_data = tt.get_group(region_data['id'].item())
filtered_df = self.create_progress_df(trace_data)
first_time = False
epsilon = 0.001 if region_name != 'sleep' else 0.05
for index, df in filtered_df.iterrows():
if df['REGION_PROGRESS'] == 1:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
first_time = True
if first_time is True and df['REGION_PROGRESS'] == 0:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
@util.skip_unless_run_long_tests()
@util.skip_unless_config_enable('bloat')
def test_region_runtimes(self):
name = 'test_region_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
region_times = collections.defaultdict(lambda: collections.defaultdict(dict))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])
for region_hash, data in tt:
filtered_df = self.create_progress_df(data)
filtered_df = filtered_df.diff()
# case where 1 - 0 = 1 for the progress column.
filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]
if len(filtered_df) > 1:
launcher.write_log(name, 'Region elapsed time stats from {} - {} :\n{}'\
.format(nn, region_hash, filtered_df['TIME'].describe()))
filtered_df['TIME'].describe()
region_times[nn][region_hash] = filtered_df
launcher.write_log(name, '{}'.format('-' * 80))
# Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.
regions = self._output.get_region_names()
write_regions = True
for nn in node_names:
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name != 'epoch' and
rr['id'].item() != 0 and
rr['count'].item() > 1):
if write_regions:
launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))
runtime = rr['sync_runtime'].item()
self.assertNear(runtime,
region_times[nn][rr['id'].item()]['TIME'].sum())
write_regions = False
# Test to ensure every region detected in the trace is captured in the report.
for nn in node_names:
report_ids = []
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
report_ids.append(rr['id'].item())
for region_hash in region_times[nn].keys():
self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))
def test_progress(self):
name = 'test_progress'
report_path = name + '.report'
num_node = 1
num_rank = 4
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
def test_count(self):
name = 'test_count'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
delay = 0.01
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
trace_data = self._output.get_trace_data(node_name=nn)
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
self.assertNear(delay * loop_count, spin_data['runtime'].item())
self.assertEqual(loop_count, spin_data['count'].item())
self.assertEqual(loop_count, epoch_data['count'].item())
self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])
@util.skip_unless_run_long_tests()
def test_scaling(self):
name = 'test_scaling'
report_path = name + '.report'
num_node = 2
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)
check_successful = True
while check_successful:
launcher.set_num_node(num_node)
launcher.set_num_rank(num_node)
try:
launcher.check_run(name)
except subprocess.CalledProcessError as e:
# If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1
# All other rc's are real errors
if e.returncode != 1:
raise e
check_successful = False
if check_successful:
launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
self.assertEqual(loop_count, dgemm_data['count'].item())
self.assertEqual(loop_count, all2all_data['count'].item())
self.assertGreater(dgemm_data['runtime'].item(), 0.0)
self.assertGreater(all2all_data['runtime'].item(), 0.0)
num_node *= 2
self._output.remove_files()
@util.skip_unless_run_long_tests()
def test_power_consumption(self):
name = 'test_power_consumption'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
self._options['power_budget'] = 130
else:
self._options['power_budget'] = 200
gov_agent_conf_path = name + '_gov_agent.config'
self._tmp_files.append(gov_agent_conf_path)
gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,
trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
all_power_data = {}
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:]
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)]
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} :\n{}'.format(nn, power_data.describe()))
all_power_data[nn] = power_data
for node_name, power_data in all_power_data.items():
# Allow for overages of 2% at the 75th percentile.
self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))
# TODO Checks on the maximum power computed during the run?
# TODO Checks to see how much power was left on the table?
@util.skip_unless_run_long_tests()
@util.skip_unless_batch()
def test_power_balancer(self):
name = 'test_power_balancer'
num_node = 4
num_rank = 16
loop_count = 500
# Require that the balancer moves the maximum dgemm runtime at
# least 1/4 the distance to the mean dgemm runtime under the
# governor.
margin_factor = 0.25
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm-imbalance', 8.0)
app_conf.append_region('all2all', 0.05)
app_conf.set_loop_count(loop_count)
# Update app config with imbalance
alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()
for nn in range(len(alloc_nodes) // 2):
app_conf.append_imbalance(alloc_nodes[nn], 0.5)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
power_budget = 130
else:
power_budget = 200
self._options = {'power_budget': power_budget}
gov_agent_conf_path = name + '_gov_agent.config'
bal_agent_conf_path = name + '_bal_agent.config'
self._tmp_files.append(gov_agent_conf_path)
self._tmp_files.append(bal_agent_conf_path)
agent_list = ['power_governor', 'power_balancer']
path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}
agent_runtime = dict()
for agent in agent_list:
agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)
run_name = '{}_{}'.format(name, agent)
report_path = '{}.report'.format(run_name)
trace_path = '{}.trace'.format(run_name)
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, time_limit=2700)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))
launcher.run(run_name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
power_limits = []
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} {} :\n{}'.format(agent, nn, power_data.describe()))
if agent == 'power_balancer':
power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])
if agent == 'power_balancer':
avg_power_limit = sum(power_limits) / len(power_limits)
self.assertTrue(avg_power_limit <= power_budget)
min_runtime = float('nan')
max_runtime = float('nan')
node_names = self._output.get_node_names()
runtime_list = []
for node_name in node_names:
epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')
runtime_list.append(epoch_data['runtime'].item())
if agent == 'power_governor':
mean_runtime = sum(runtime_list) / len(runtime_list)
max_runtime = max(runtime_list)
margin = margin_factor * (max_runtime - mean_runtime)
agent_runtime[agent] = max(runtime_list)
self.assertGreater(agent_runtime['power_governor'] - margin,
agent_runtime['power_balancer'],
"governor runtime: {}, balancer runtime: {}, margin: {}".format(
agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))
def test_progress_exit(self):
name = 'test_progress_exit'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 100
big_o = 0.1
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm-progress', big_o)
app_conf.append_region('spin-progress', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
tt = tt.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_hash, data in tt:
tmp = data['REGION_PROGRESS'].diff()
if region_hash == 8300189175:
negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]
launcher.write_log(name, '{}'.format(negative_progress))
self.assertEqual(0, len(negative_progress))
@util.skip_unless_run_long_tests()
@util.skip_unless_optimized()
def test_sample_rate(self):
name = 'test_sample_rate'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 10
big_o = 10.0
region = 'dgemm-progress'
max_mean = 0.01
max_nstd = 0.1
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region(region, big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
delta_t = tt['TIME'].diff()
delta_t = delta_t.loc[delta_t != 0]
self.assertGreater(max_mean, delta_t.mean())
size_orig = len(delta_t)
delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()]
self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))
self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
def test_network_times(self):
name = 'test_network_times'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')
unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertEqual(0, unmarked_data['count'].item())
mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)
self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
epoch_data['network_time'].item())
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
app_total['network-time'].item())
self.assertEqual(0, unmarked_data['network_time'].item())
self.assertEqual(0, sleep_data['network_time'].item())
self.assertEqual(0, dgemm_data['network_time'].item())
def test_ignore_runtime(self):
name = 'test_ignore_runtime'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('ignore', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
ignore_data = self._output.get_report_data(node_name=nn, region='ignore')
app_data = self._output.get_app_total_data(node_name=nn)
self.assertNear(ignore_data['runtime'].item(),
app_data['ignore-runtime'].item(), 0.00005)
@util.skip_unless_config_enable('ompt')
def test_unmarked_ompt(self):
name = 'test_unmarked_ompt'
report_path = name + '.report'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('stream-unmarked', 1.0)
app_conf.append_region('dgemm-unmarked', 1.0)
app_conf.append_region('all2all-unmarked', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
stream_id = None
region_names = self._output.get_region_names()
stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]
for nn in node_names:
stream_data = self._output.get_report_data(node_name=nn, region=stream_name)
found = False
for name in region_names:
if stream_name in name:
found = True
self.assertTrue(found)
self.assertEqual(1, stream_data['count'].item())
if stream_id:
self.assertEqual(stream_id, stream_data['id'].item())
else:
stream_id = stream_data['id'].item()
ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]
self.assertLessEqual(2, len(ompt_regions))
self.assertTrue(('MPI_Alltoall' in region_names))
gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]
self.assertLessEqual(1, len(gemm_region))
def _test_agent_frequency_map(self, name, use_env=False):
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "frequency_map"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 5
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.append_region('all2all', 1.0)
app_conf.write()
freq_map = {}
freq_map['dgemm'] = sticker_freq
freq_map['stream'] = sticker_freq - 2 * freq_step
freq_map['all2all'] = min_freq
self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name in ['dgemm', 'stream', 'all2all']):
msg = region_name + " frequency should be near assigned map frequency"
self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)
def test_agent_frequency_map_env(self):
self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)
def test_agent_frequency_map_policy(self):
self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)
def test_agent_energy_efficient_single_region(self):
name = 'test_energy_efficient_single_region'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', 0.1)
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
report = geopmpy.io.RawReport(report_path)
if (region_name in ['spin']):
region = report.raw_region(nn, region_name)
msg = region_name + " frequency should be minimum frequency as specified by policy"
self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg)
@util.skip_unless_run_long_tests()
@util.skip_unless_cpufreq()
@util.skip_unless_batch()
def test_agent_energy_efficient(self):
name = 'test_energy_efficient_sticker'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
num_node = 1
num_rank = 4
loop_count = 200
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
run = ['_sticker', '_nan_nan']
for rr in run:
report_path = name + rr + '.report'
trace_path = name + rr + '.trace'
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.write()
if rr == '_sticker':
self._options = {'frequency_min': sticker_freq,
'frequency_max': sticker_freq}
freq = sticker_freq
else:
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name + rr)
report_path = name + run[0] + '.report'
trace_path = name + run[0] + '.trace'
sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
report_path = name + run[1] + '.report'
trace_path = name + run[1] + '.trace'
nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
for nn in nan_out.get_node_names():
sticker_app_total = sticker_out.get_app_total_data(node_name=nn)
nan_app_total = nan_out.get_app_total_data(node_name=nn)
runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()
energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()
self.assertLess(-0.1, runtime_savings_epoch)
self.assertLess(0.0, energy_savings_epoch)
class TestIntegrationGeopmio(unittest.TestCase):
def setUp(self):
self.skip_warning_string = 'Incompatible CPU'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line:
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_output_range(self, args, min_exp, max_exp):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() in line:
continue
if line.startswith(b'0x'):
value = int(line)
else:
value = float(line)
self.assertLessEqual(min_exp, value, msg="Value read for {} smaller than {}: {}.".format(args, min_exp, value))
self.assertGreaterEqual(max_exp, value, msg="Value read for {} larger than {}: {}.".format(args, max_exp, value))
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmread_command_line(self):
self.exec_name = "geopmread"
self.check_no_error([])
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_output(['--domain', 'TIME'], ['cpu'])
self.check_no_error(['TIME', 'board', '0'])
self.check_no_error(['--info'])
self.check_output(['--info', 'TIME'], ['Time in seconds'])
read_err = 'domain type and domain index are required'
self.check_output(['TIME'], [read_err])
self.check_output(['TIME', 'board'], [read_err])
self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])
self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])
self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])
self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmread_all_signal_agg(self):
self.exec_name = "geopmread"
all_signals = []
try:
proc = subprocess.Popen([self.exec_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
for sig in all_signals:
self.check_no_error([sig.decode(), 'board', '0'])
@util.skip_unless_batch()
def test_geopmread_signal_value(self):
self.exec_name = "geopmread"
signal_range = {
"POWER_PACKAGE": (20, 400),
"FREQUENCY": (1.0e8, 5.0e9),
"TIME": (0, 10),
"TEMPERATURE_CORE": (0, 100)
}
for signal, val_range in signal_range.items():
try:
self.check_no_error([signal, "board", "0"])
except:
raise
pass
else:
self.check_output_range([signal, "board", "0"], *val_range)
def test_geopmread_custom_msr(self):
self.exec_name = "geopmread"
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))),
'examples/custom_msr/')
custom_env = os.environ.copy()
custom_env['GEOPM_PLUGIN_PATH'] = path
all_signals = []
try:
proc = subprocess.Popen([self.exec_name], env=custom_env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)
def test_geopmwrite_command_line(self):
self.exec_name = "geopmwrite"
self.check_no_error([])
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_no_error(['--domain', 'FREQUENCY'])
self.check_no_error(['--info'])
self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])
write_err = 'domain type, domain index, and value are required'
self.check_output(['FREQUENCY'], [write_err])
self.check_output(['FREQUENCY', 'board'], [write_err])
self.check_output(['FREQUENCY', 'board', '0'], [write_err])
self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])
self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])
self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])
self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])
self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmwrite_set_freq(self):
def read_stdout_line(stdout):
line = stdout.readline()
while self.skip_warning_string.encode() in line:
line = stdout.readline()
return line.strip()
def read_current_freq(domain, signal='FREQUENCY'):
read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
freq = read_stdout_line(read_proc.stdout)
freq = float(freq)
return freq
def read_min_max_freq():
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
min_freq = read_stdout_line(read_proc.stdout)
min_freq = float(int(float(min_freq)/1e8)*1e8)
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
max_freq = read_stdout_line(read_proc.stdout)
max_freq = float(int(float(max_freq)/1e8)*1e8)
return min_freq, max_freq
self.exec_name = "geopmwrite"
read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
read_domain = read_stdout_line(read_proc.stdout).decode()
write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
write_domain = read_stdout_line(write_proc.stdout).decode()
min_freq, max_freq = read_min_max_freq()
old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')
self.assertLess(old_freq, max_freq * 2)
self.assertGreater(old_freq, min_freq - 1e8)
self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])
result = read_current_freq(read_domain)
self.assertEqual(min_freq, result)
self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])
result = read_current_freq(read_domain)
self.assertEqual(max_freq, result)
self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])
class TestIntegrationGeopmagent(unittest.TestCase):
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
agent_names = ['monitor', 'power_balancer', 'power_governor',
'energy_efficient', 'frequency_map']
self.check_output([], agent_names)
self.check_output(['--help'], ['Usage'])
self.check_no_error(['--version'])
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],
{'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],
['Number of policies', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
| true | true |
f7302e0b103de32216051a22e30483430b67f84e | 940 | py | Python | running_modes/utils/general.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 183 | 2020-04-04T02:01:15.000Z | 2022-03-30T21:56:56.000Z | running_modes/utils/general.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 39 | 2020-04-05T15:19:56.000Z | 2022-03-09T12:58:21.000Z | running_modes/utils/general.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 70 | 2020-04-05T19:25:43.000Z | 2022-02-22T12:04:39.000Z | import time
import numpy as np
import torch
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.from_numpy(tensor)
if torch.cuda.is_available():
return torch.autograd.Variable(tensor).cuda()
return torch.autograd.Variable(tensor)
def set_default_device_cuda():
"""Sets the default device (cpu or cuda) used for all tensors."""
if torch.cuda.is_available() == False:
tensor = torch.FloatTensor
torch.set_default_tensor_type(tensor)
return False
else: # device_name == "cuda":
tensor = torch.cuda.FloatTensor # pylint: disable=E1101
torch.set_default_tensor_type(tensor)
return True
def estimate_run_time(start_time, n_steps, step):
time_elapsed = int(time.time() - start_time)
time_left = (time_elapsed * ((n_steps - step) / (step + 1)))
summary = {"elapsed": time_elapsed, "left": time_left}
return summary | 30.322581 | 69 | 0.678723 | import time
import numpy as np
import torch
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.from_numpy(tensor)
if torch.cuda.is_available():
return torch.autograd.Variable(tensor).cuda()
return torch.autograd.Variable(tensor)
def set_default_device_cuda():
if torch.cuda.is_available() == False:
tensor = torch.FloatTensor
torch.set_default_tensor_type(tensor)
return False
else:
tensor = torch.cuda.FloatTensor
torch.set_default_tensor_type(tensor)
return True
def estimate_run_time(start_time, n_steps, step):
time_elapsed = int(time.time() - start_time)
time_left = (time_elapsed * ((n_steps - step) / (step + 1)))
summary = {"elapsed": time_elapsed, "left": time_left}
return summary | true | true |
f7302e9cf28bc426a294342f8db30d4a7364613d | 370 | py | Python | multiple-languages/python/ros-cdk-cas-1.0.3/src/ros_cdk_cas/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 15 | 2020-11-10T02:00:28.000Z | 2022-02-07T19:28:10.000Z | multiple-languages/python/ros-cdk-cas-1.0.3/src/ros_cdk_cas/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 23 | 2021-02-02T04:37:02.000Z | 2022-03-31T06:41:06.000Z | multiple-languages/python/ros-cdk-cas-1.0.3/src/ros_cdk_cas/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 4 | 2021-01-13T05:48:43.000Z | 2022-03-15T11:26:48.000Z | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import constructs._jsii
import ros_cdk_core._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@alicloud/ros-cdk-cas", "1.0.3", __name__[0:-6], "ros-cdk-cas@1.0.3.jsii.tgz"
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| 16.086957 | 82 | 0.759459 | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import constructs._jsii
import ros_cdk_core._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@alicloud/ros-cdk-cas", "1.0.3", __name__[0:-6], "ros-cdk-cas@1.0.3.jsii.tgz"
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| true | true |
f7302ec626d9babefa67e7f7cd70358eb037d937 | 1,552 | py | Python | src/pyri/webui_browser/plugins/panel.py | pyri-project/pyri-webui-browser | 57f20bef7af357a8d051c700aff95fef389a3be0 | [
"Apache-2.0"
] | null | null | null | src/pyri/webui_browser/plugins/panel.py | pyri-project/pyri-webui-browser | 57f20bef7af357a8d051c700aff95fef389a3be0 | [
"Apache-2.0"
] | null | null | null | src/pyri/webui_browser/plugins/panel.py | pyri-project/pyri-webui-browser | 57f20bef7af357a8d051c700aff95fef389a3be0 | [
"Apache-2.0"
] | null | null | null | from typing import List, Dict, Callable, Any, NamedTuple, TYPE_CHECKING
from pyri.plugins import util as plugin_util
if TYPE_CHECKING:
from .. import PyriWebUIBrowser
class PyriWebUIBrowserPanelInfo(NamedTuple):
title: str
panel_type: str
priority: int
class PyriWebUIBrowserPanelBase:
pass
class PyriWebUIBrowserPanelPluginFactory:
def __init__(self):
super().__init__()
def get_plugin_name(self) -> str:
return ""
def get_panels_infos(self) -> Dict[str,PyriWebUIBrowserPanelInfo]:
return []
async def add_panel(self, panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> PyriWebUIBrowserPanelBase:
raise NotImplementedError()
def get_webui_browser_panel_factories() -> List[PyriWebUIBrowserPanelPluginFactory]:
return plugin_util.get_plugin_factories("pyri.plugins.webui_browser_panel")
def get_all_webui_browser_panels_infos() -> Dict[str,Any]:
ret = dict()
factories = get_webui_browser_panel_factories()
for factory in factories:
ret[factory.get_plugin_name()] = factory.get_panels_infos()
return ret
async def add_webui_browser_panel(panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> Dict[str,Any]:
factories = get_webui_browser_panel_factories()
for factory in factories:
infos = factory.get_panels_infos()
if panel_type in infos:
return await factory.add_panel(panel_type, core, parent_element)
assert False, f"Unknown panel_type \"{panel_type}\" specified"
| 33.021277 | 123 | 0.73518 | from typing import List, Dict, Callable, Any, NamedTuple, TYPE_CHECKING
from pyri.plugins import util as plugin_util
if TYPE_CHECKING:
from .. import PyriWebUIBrowser
class PyriWebUIBrowserPanelInfo(NamedTuple):
title: str
panel_type: str
priority: int
class PyriWebUIBrowserPanelBase:
pass
class PyriWebUIBrowserPanelPluginFactory:
def __init__(self):
super().__init__()
def get_plugin_name(self) -> str:
return ""
def get_panels_infos(self) -> Dict[str,PyriWebUIBrowserPanelInfo]:
return []
async def add_panel(self, panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> PyriWebUIBrowserPanelBase:
raise NotImplementedError()
def get_webui_browser_panel_factories() -> List[PyriWebUIBrowserPanelPluginFactory]:
return plugin_util.get_plugin_factories("pyri.plugins.webui_browser_panel")
def get_all_webui_browser_panels_infos() -> Dict[str,Any]:
ret = dict()
factories = get_webui_browser_panel_factories()
for factory in factories:
ret[factory.get_plugin_name()] = factory.get_panels_infos()
return ret
async def add_webui_browser_panel(panel_type: str, core: "PyriWebUIBrowser", parent_element: Any) -> Dict[str,Any]:
factories = get_webui_browser_panel_factories()
for factory in factories:
infos = factory.get_panels_infos()
if panel_type in infos:
return await factory.add_panel(panel_type, core, parent_element)
assert False, f"Unknown panel_type \"{panel_type}\" specified"
| true | true |
f7302fb44428bb7330b8cbab6d2be8a127232a1b | 1,451 | py | Python | monk/system_unit_tests/pytorch/test_block_resnet_v2.py | Shreyashwaghe/monk_v1 | 4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b | [
"Apache-2.0"
] | 7 | 2020-07-26T08:37:29.000Z | 2020-10-30T10:23:11.000Z | monk/system_unit_tests/pytorch/test_block_resnet_v2.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | null | null | null | monk/system_unit_tests/pytorch/test_block_resnet_v2.py | mursalfk/monk_v1 | 62f34a52f242772186ffff7e56764e958fbcd920 | [
"Apache-2.0"
] | 1 | 2020-10-07T12:57:44.000Z | 2020-10-07T12:57:44.000Z | import os
import sys
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_block_resnet_v2(system_dict):
forward = True;
test = "test_block_resnet_v2";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=True));
network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=False));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = torch.randn(1, 1, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 29.612245 | 96 | 0.644383 | import os
import sys
sys.path.append("../../../monk/");
import psutil
from pytorch_prototype import prototype
from compare_prototype import compare
from common import print_start
from common import print_status
import torch
import numpy as np
from pytorch.losses.return_loss import load_loss
def test_block_resnet_v2(system_dict):
forward = True;
test = "test_block_resnet_v2";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=True));
network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=False));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = torch.randn(1, 1, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| true | true |
f7302ffbe807ed5672868941d35075e072212e37 | 2,694 | py | Python | sgnlp/models/span_extraction/train.py | raymondng76/sgnlp | f09eada90ef5b1ee979901e5c14413d32e758049 | [
"MIT"
] | 14 | 2021-08-02T01:52:18.000Z | 2022-01-14T10:16:02.000Z | sgnlp/models/span_extraction/train.py | raymondng76/sgnlp | f09eada90ef5b1ee979901e5c14413d32e758049 | [
"MIT"
] | 29 | 2021-08-02T01:53:46.000Z | 2022-03-30T05:40:46.000Z | sgnlp/models/span_extraction/train.py | raymondng76/sgnlp | f09eada90ef5b1ee979901e5c14413d32e758049 | [
"MIT"
] | 7 | 2021-08-02T01:54:19.000Z | 2022-01-07T06:37:45.000Z | import json
import math
from transformers import Trainer
from transformers import TrainingArguments
from .config import RecconSpanExtractionConfig
from .data_class import RecconSpanExtractionArguments
from .modeling import RecconSpanExtractionModel
from .tokenization import RecconSpanExtractionTokenizer
from .utils import parse_args_and_load_config, load_examples, RecconSpanExtractionData
def train_model(cfg: RecconSpanExtractionArguments):
"""
Method for training RecconSpanExtractionModel.
Args:
config (:obj:`RecconSpanExtractionArguments`):
RecconSpanExtractionArguments config load from config file.
Example::
import json
from sgnlp.models.span_extraction import train
from sgnlp.models.span_extraction.utils import parse_args_and_load_config
cfg = parse_args_and_load_config('config/span_extraction_config.json')
train(cfg)
"""
config = RecconSpanExtractionConfig.from_pretrained(cfg.model_name)
tokenizer = RecconSpanExtractionTokenizer.from_pretrained(cfg.model_name)
model = RecconSpanExtractionModel.from_pretrained(cfg.model_name, config=config)
with open(cfg.train_data_path, "r") as train_file:
train_json = json.load(train_file)
with open(cfg.val_data_path, "r") as val_file:
val_json = json.load(val_file)
load_train_exp_args = {
"examples": train_json,
"tokenizer": tokenizer,
"max_seq_length": cfg.max_seq_length,
"doc_stride": cfg.doc_stride,
"max_query_length": cfg.max_query_length,
}
load_valid_exp_args = {
"examples": val_json,
"tokenizer": tokenizer,
"max_seq_length": cfg.max_seq_length,
"doc_stride": cfg.doc_stride,
"max_query_length": cfg.max_query_length,
}
train_dataset = load_examples(**load_train_exp_args)
val_dataset = load_examples(**load_valid_exp_args)
t_total = (
len(train_dataset)
// cfg.train_args["gradient_accumulation_steps"]
* cfg.train_args["num_train_epochs"]
)
cfg.train_args["eval_steps"] = int(
len(train_dataset) / cfg.train_args["per_device_train_batch_size"]
)
cfg.train_args["warmup_steps"] = math.ceil(t_total * cfg.train_args["warmup_ratio"])
training_args = TrainingArguments(**cfg.train_args)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=RecconSpanExtractionData(train_dataset),
eval_dataset=RecconSpanExtractionData(val_dataset),
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
cfg = parse_args_and_load_config()
train_model(cfg)
| 32.071429 | 88 | 0.717892 | import json
import math
from transformers import Trainer
from transformers import TrainingArguments
from .config import RecconSpanExtractionConfig
from .data_class import RecconSpanExtractionArguments
from .modeling import RecconSpanExtractionModel
from .tokenization import RecconSpanExtractionTokenizer
from .utils import parse_args_and_load_config, load_examples, RecconSpanExtractionData
def train_model(cfg: RecconSpanExtractionArguments):
config = RecconSpanExtractionConfig.from_pretrained(cfg.model_name)
tokenizer = RecconSpanExtractionTokenizer.from_pretrained(cfg.model_name)
model = RecconSpanExtractionModel.from_pretrained(cfg.model_name, config=config)
with open(cfg.train_data_path, "r") as train_file:
train_json = json.load(train_file)
with open(cfg.val_data_path, "r") as val_file:
val_json = json.load(val_file)
load_train_exp_args = {
"examples": train_json,
"tokenizer": tokenizer,
"max_seq_length": cfg.max_seq_length,
"doc_stride": cfg.doc_stride,
"max_query_length": cfg.max_query_length,
}
load_valid_exp_args = {
"examples": val_json,
"tokenizer": tokenizer,
"max_seq_length": cfg.max_seq_length,
"doc_stride": cfg.doc_stride,
"max_query_length": cfg.max_query_length,
}
train_dataset = load_examples(**load_train_exp_args)
val_dataset = load_examples(**load_valid_exp_args)
t_total = (
len(train_dataset)
// cfg.train_args["gradient_accumulation_steps"]
* cfg.train_args["num_train_epochs"]
)
cfg.train_args["eval_steps"] = int(
len(train_dataset) / cfg.train_args["per_device_train_batch_size"]
)
cfg.train_args["warmup_steps"] = math.ceil(t_total * cfg.train_args["warmup_ratio"])
training_args = TrainingArguments(**cfg.train_args)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=RecconSpanExtractionData(train_dataset),
eval_dataset=RecconSpanExtractionData(val_dataset),
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
cfg = parse_args_and_load_config()
train_model(cfg)
| true | true |
f73030990cc126d9327feaa150791a0d22622092 | 1,387 | py | Python | tests/update_test_outputs.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | null | null | null | tests/update_test_outputs.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | null | null | null | tests/update_test_outputs.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Script for updating the output files using the current behavior.
"""
import sys
sys.path.append("..")
sys.path.append(".")
from glob import glob
import unittest
import re
from typing import cast, List, Sequence
from os import path
from corpus2alpino.converter import Converter
from corpus2alpino.collectors.filesystem import FilesystemCollector
from corpus2alpino.targets.memory import MemoryTarget
from corpus2alpino.writers.paqu import PaQuWriter
args = sys.argv[1:]
if args:
patterns = args[0].split(',')
else:
patterns = ['example*.xml', 'example*.txt', 'example*.cha']
paqu_writer = PaQuWriter()
test_files = cast(List[str], [])
for pattern in patterns:
test_files += (f for f in glob(path.join(path.dirname(__file__), pattern))
if '_expected' not in f)
converter = Converter(
FilesystemCollector(test_files),
target=MemoryTarget(),
writer=paqu_writer)
converted = list(converter.convert())
for test_file, output in zip(test_files, converted):
expected_filename = re.sub('\.(txt|xml|cha)$', '_expected.txt', test_file)
with open(expected_filename, mode='w', encoding='utf-8') as expected_file:
expected_file.write(output)
from test_enrich_lassy import get_enriched
with open('enrichment_expected.xml', mode='w', encoding='utf-8') as expected_file:
expected_file.write(get_enriched())
| 29.510638 | 82 | 0.733237 |
import sys
sys.path.append("..")
sys.path.append(".")
from glob import glob
import unittest
import re
from typing import cast, List, Sequence
from os import path
from corpus2alpino.converter import Converter
from corpus2alpino.collectors.filesystem import FilesystemCollector
from corpus2alpino.targets.memory import MemoryTarget
from corpus2alpino.writers.paqu import PaQuWriter
args = sys.argv[1:]
if args:
patterns = args[0].split(',')
else:
patterns = ['example*.xml', 'example*.txt', 'example*.cha']
paqu_writer = PaQuWriter()
test_files = cast(List[str], [])
for pattern in patterns:
test_files += (f for f in glob(path.join(path.dirname(__file__), pattern))
if '_expected' not in f)
converter = Converter(
FilesystemCollector(test_files),
target=MemoryTarget(),
writer=paqu_writer)
converted = list(converter.convert())
for test_file, output in zip(test_files, converted):
expected_filename = re.sub('\.(txt|xml|cha)$', '_expected.txt', test_file)
with open(expected_filename, mode='w', encoding='utf-8') as expected_file:
expected_file.write(output)
from test_enrich_lassy import get_enriched
with open('enrichment_expected.xml', mode='w', encoding='utf-8') as expected_file:
expected_file.write(get_enriched())
| true | true |
f730313f1a3cfec6df0cdc426961abe92340dec7 | 2,873 | py | Python | spiketoolkit/preprocessing/center.py | teristam/spiketoolk | 0ae7adabce46cf620c3627ee0093d890996ef355 | [
"MIT"
] | null | null | null | spiketoolkit/preprocessing/center.py | teristam/spiketoolk | 0ae7adabce46cf620c3627ee0093d890996ef355 | [
"MIT"
] | null | null | null | spiketoolkit/preprocessing/center.py | teristam/spiketoolk | 0ae7adabce46cf620c3627ee0093d890996ef355 | [
"MIT"
] | null | null | null | from spikeextractors import RecordingExtractor
from .transform import TransformRecording
import numpy as np
class CenterRecording(TransformRecording):
preprocessor_name = 'Center'
def __init__(self, recording, mode, seconds, n_snippets):
if not isinstance(recording, RecordingExtractor):
raise ValueError("'recording' must be a RecordingExtractor")
self._scalar = 1
self._mode = mode
self._seconds = seconds
self._n_snippets = n_snippets
assert self._mode in ['mean', 'median'], "'mode' can be 'mean' or 'median'"
# use n_snippets of equal duration equally distributed on the recording
n_snippets = int(n_snippets)
assert n_snippets > 0, "'n_snippets' must be positive"
snip_len = seconds / n_snippets * recording.get_sampling_frequency()
if seconds * recording.get_sampling_frequency() >= recording.get_num_frames():
traces = recording.get_traces()
else:
# skip initial and final part
snip_start = np.linspace(snip_len // 2, recording.get_num_frames()-int(1.5*snip_len), n_snippets)
traces_snippets = recording.get_snippets(reference_frames=snip_start, snippet_len=snip_len)
traces_snippets = traces_snippets.swapaxes(0, 1)
traces = traces_snippets.reshape((traces_snippets.shape[0],
traces_snippets.shape[1] * traces_snippets.shape[2]))
if self._mode == 'mean':
self._offset = -np.mean(traces, axis=1)
else:
self._offset = -np.median(traces, axis=1)
dtype = str(recording.get_dtype())
if 'uint' in dtype:
if 'numpy' in dtype:
dtype = str(dtype).replace("<class '", "").replace("'>", "")
# drop 'numpy'
dtype = dtype.split('.')[1]
dtype = dtype[1:]
TransformRecording.__init__(self, recording, scalar=self._scalar, offset=self._offset, dtype=dtype)
self._kwargs = {'recording': recording.make_serialized_dict(), 'mode': mode, 'seconds': seconds,
'n_snippets': n_snippets}
def center(recording, mode='median', seconds=10., n_snippets=10):
'''
Removes the offset of the traces channel by channel.
Parameters
----------
recording: RecordingExtractor
The recording extractor to be transformed
mode: str
'median' (default) or 'mean'
seconds: float
Number of seconds used to compute center
n_snippets: int
Number of snippets in which the total 'seconds' are divided spanning the recording duration
Returns
-------
center: CenterRecording
The output recording extractor object
'''
return CenterRecording(recording=recording, mode=mode, seconds=seconds, n_snippets=n_snippets)
| 41.637681 | 109 | 0.637313 | from spikeextractors import RecordingExtractor
from .transform import TransformRecording
import numpy as np
class CenterRecording(TransformRecording):
preprocessor_name = 'Center'
def __init__(self, recording, mode, seconds, n_snippets):
if not isinstance(recording, RecordingExtractor):
raise ValueError("'recording' must be a RecordingExtractor")
self._scalar = 1
self._mode = mode
self._seconds = seconds
self._n_snippets = n_snippets
assert self._mode in ['mean', 'median'], "'mode' can be 'mean' or 'median'"
n_snippets = int(n_snippets)
assert n_snippets > 0, "'n_snippets' must be positive"
snip_len = seconds / n_snippets * recording.get_sampling_frequency()
if seconds * recording.get_sampling_frequency() >= recording.get_num_frames():
traces = recording.get_traces()
else:
snip_start = np.linspace(snip_len // 2, recording.get_num_frames()-int(1.5*snip_len), n_snippets)
traces_snippets = recording.get_snippets(reference_frames=snip_start, snippet_len=snip_len)
traces_snippets = traces_snippets.swapaxes(0, 1)
traces = traces_snippets.reshape((traces_snippets.shape[0],
traces_snippets.shape[1] * traces_snippets.shape[2]))
if self._mode == 'mean':
self._offset = -np.mean(traces, axis=1)
else:
self._offset = -np.median(traces, axis=1)
dtype = str(recording.get_dtype())
if 'uint' in dtype:
if 'numpy' in dtype:
dtype = str(dtype).replace("<class '", "").replace("'>", "")
dtype = dtype.split('.')[1]
dtype = dtype[1:]
TransformRecording.__init__(self, recording, scalar=self._scalar, offset=self._offset, dtype=dtype)
self._kwargs = {'recording': recording.make_serialized_dict(), 'mode': mode, 'seconds': seconds,
'n_snippets': n_snippets}
def center(recording, mode='median', seconds=10., n_snippets=10):
return CenterRecording(recording=recording, mode=mode, seconds=seconds, n_snippets=n_snippets)
| true | true |
f7303176a948e5cc9fbd74ef4d04e4f617797080 | 3,283 | py | Python | dcgan/mnist/InceptionScore.py | DoubleE1/Keras-GAN | 775eb82b18cb146203295f19c937d4290de2953f | [
"MIT"
] | null | null | null | dcgan/mnist/InceptionScore.py | DoubleE1/Keras-GAN | 775eb82b18cb146203295f19c937d4290de2953f | [
"MIT"
] | null | null | null | dcgan/mnist/InceptionScore.py | DoubleE1/Keras-GAN | 775eb82b18cb146203295f19c937d4290de2953f | [
"MIT"
] | null | null | null | # calculate inception score for cifar-10 in Keras
import numpy as np
import matplotlib.pyplot as plt
from math import floor
from numpy import ones, expand_dims, log, mean, std, exp
from numpy.random import shuffle
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.datasets import cifar10
from skimage.transform import resize
from numpy import asarray
from PIL import Image
import os.path
from os import path
from IPython.display import clear_output
# scale an array of images to a new size
def scale_images(images, new_shape):
images_list = list()
for image in images:
# resize with nearest neighbor interpolation
new_image = resize(image, new_shape, 0)
# store
images_list.append(new_image)
return asarray(images_list)
def crop_center(img):
#hardcoded for now
left = 143
top = 58
right = 513
bottom = 427
# Crop the center of the image
return np.asarray(img.crop((left, top, right, bottom)))
# assumes images have any shape and pixels in [0,255]
def calculate_inception_score(images, n_split=10, eps=1E-16):
# load inception v3 model
model = InceptionV3()
# enumerate splits of images/predictions
scores = list()
n_part = floor(images.shape[0] / n_split)
for i in range(n_split):
# retrieve images
ix_start, ix_end = i * n_part, (i+1) * n_part
subset = images[ix_start:ix_end]
# convert from uint8 to float32
print(i, ix_end, ix_start, n_part)
subset = subset.astype('float32')
# scale images to the required size
subset = scale_images(subset, (299,299,1))
# pre-process images, scale to [-1,1]
subset = preprocess_input(subset)
# predict p(y|x)
p_yx = model.predict(subset)
# calculate p(y)
p_y = expand_dims(p_yx.mean(axis=0), 0)
# calculate KL divergence using log probabilities
kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))
# sum over classes
sum_kl_d = kl_d.sum(axis=1)
# average over images
avg_kl_d = mean(sum_kl_d)
# undo the log
is_score = exp(avg_kl_d)
# store
scores.append(is_score)
# print(i)
# average across images
is_avg, is_std = mean(scores), std(scores)
return is_avg, is_std
image_path = "Keras-GAN/dcgan/mnist/single_mnist_images"
if path.exists(image_path):
images = []
head_tail = path.split(image_path)
for i in range(2):
head_tail = head_tail[0]
head_tail = path.split(head_tail)
if ~image_path.endswith('/'):
image_path = image_path + '/'
print(image_path)
for i in range(5000):
if path.exists(image_path + str(f"{i}.png")):
new_image_path = image_path + str(f"{i}.png")
print("Loaded image: ", str(f"{i}.png"))
img = Image.open(new_image_path)
img = crop_center(img)
# append the image into a list
images.append(img)
clear_output()
# convert the list into array
images = np.asarray(images)
print(images.shape)
# calculates the average and standard deviation inception scores
is_avg, is_std = calculate_inception_score(images)
print(f"The inception score for {head_tail[1]}")
print('average inception score:', is_avg, 'standard deviation inception scores:', is_std)
else:
print("Image path not found") | 30.971698 | 91 | 0.687176 |
import numpy as np
import matplotlib.pyplot as plt
from math import floor
from numpy import ones, expand_dims, log, mean, std, exp
from numpy.random import shuffle
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.datasets import cifar10
from skimage.transform import resize
from numpy import asarray
from PIL import Image
import os.path
from os import path
from IPython.display import clear_output
def scale_images(images, new_shape):
images_list = list()
for image in images:
new_image = resize(image, new_shape, 0)
images_list.append(new_image)
return asarray(images_list)
def crop_center(img):
left = 143
top = 58
right = 513
bottom = 427
return np.asarray(img.crop((left, top, right, bottom)))
def calculate_inception_score(images, n_split=10, eps=1E-16):
model = InceptionV3()
scores = list()
n_part = floor(images.shape[0] / n_split)
for i in range(n_split):
ix_start, ix_end = i * n_part, (i+1) * n_part
subset = images[ix_start:ix_end]
print(i, ix_end, ix_start, n_part)
subset = subset.astype('float32')
subset = scale_images(subset, (299,299,1))
subset = preprocess_input(subset)
p_yx = model.predict(subset)
p_y = expand_dims(p_yx.mean(axis=0), 0)
kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))
sum_kl_d = kl_d.sum(axis=1)
avg_kl_d = mean(sum_kl_d)
is_score = exp(avg_kl_d)
scores.append(is_score)
is_avg, is_std = mean(scores), std(scores)
return is_avg, is_std
image_path = "Keras-GAN/dcgan/mnist/single_mnist_images"
if path.exists(image_path):
images = []
head_tail = path.split(image_path)
for i in range(2):
head_tail = head_tail[0]
head_tail = path.split(head_tail)
if ~image_path.endswith('/'):
image_path = image_path + '/'
print(image_path)
for i in range(5000):
if path.exists(image_path + str(f"{i}.png")):
new_image_path = image_path + str(f"{i}.png")
print("Loaded image: ", str(f"{i}.png"))
img = Image.open(new_image_path)
img = crop_center(img)
images.append(img)
clear_output()
images = np.asarray(images)
print(images.shape)
is_avg, is_std = calculate_inception_score(images)
print(f"The inception score for {head_tail[1]}")
print('average inception score:', is_avg, 'standard deviation inception scores:', is_std)
else:
print("Image path not found") | true | true |
f73032ee4c4acdb0ede3dd7e43679cf1876d488e | 2,431 | py | Python | theseus/utilities/cuda.py | kaylode/Custom-Template | b2f11bfacf2b03b793476a19781f9046fab6fd82 | [
"MIT"
] | 2 | 2022-02-18T04:41:29.000Z | 2022-03-12T09:04:14.000Z | theseus/utilities/cuda.py | kaylode/Custom-Template | b2f11bfacf2b03b793476a19781f9046fab6fd82 | [
"MIT"
] | 8 | 2022-02-16T17:01:28.000Z | 2022-03-28T02:53:45.000Z | theseus/utilities/cuda.py | kaylode/Custom-Template | b2f11bfacf2b03b793476a19781f9046fab6fd82 | [
"MIT"
] | 3 | 2022-02-13T05:00:13.000Z | 2022-03-02T00:11:27.000Z | """ CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from typing import Any
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger('main')
def get_devices_info(device_names="0"):
if device_names.startswith('cuda'):
device_names = device_names.split('cuda:')[1]
elif device_names.startswith('cpu'):
return "CPU"
devices_info = ""
for i, device_id in enumerate(device_names.split(',')):
p = torch.cuda.get_device_properties(i)
devices_info += f"CUDA:{device_id} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
return devices_info
def get_device(name='cpu') -> torch.device:
if name.startswith('cuda'):
if not torch.cuda.is_available():
LOGGER.text("CUDA is not available. Using CPU...", level=LoggerObserver.WARN)
name = 'cpu'
return torch.device(name)
def move_to(obj: Any, device: torch.device):
"""Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283
Arguments:
obj {dict, list} -- Object to be moved to device
device {torch.device} -- Device that object will be moved to
Raises:
TypeError: object is of type that is not implemented to process
Returns:
type(obj) -- same object but moved to specified device
"""
if torch.is_tensor(obj) or isinstance(obj, torch.nn.Module):
return obj.to(device)
if isinstance(obj, dict):
res = {k: move_to(v, device) for k, v in obj.items()}
return res
if isinstance(obj, list):
return [move_to(v, device) for v in obj]
if isinstance(obj, tuple):
return tuple(move_to(list(obj), device))
return obj
def detach(obj: Any):
"""Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283
Arguments:
obj {dict, list} -- Object to be moved to cpu
Raises:
TypeError: Invalid type for detach
Returns:
type(obj) -- same object but moved to cpu
"""
if torch.is_tensor(obj):
return obj.detach()
if isinstance(obj, dict):
res = {k: detach(v) for k, v in obj.items()}
return res
if isinstance(obj, list):
return [detach(v) for v in obj]
if isinstance(obj, tuple):
return tuple(detach(list(obj)))
raise TypeError("Invalid type for detach") | 34.728571 | 103 | 0.643768 | import torch
from typing import Any
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger('main')
def get_devices_info(device_names="0"):
if device_names.startswith('cuda'):
device_names = device_names.split('cuda:')[1]
elif device_names.startswith('cpu'):
return "CPU"
devices_info = ""
for i, device_id in enumerate(device_names.split(',')):
p = torch.cuda.get_device_properties(i)
devices_info += f"CUDA:{device_id} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n"
return devices_info
def get_device(name='cpu') -> torch.device:
if name.startswith('cuda'):
if not torch.cuda.is_available():
LOGGER.text("CUDA is not available. Using CPU...", level=LoggerObserver.WARN)
name = 'cpu'
return torch.device(name)
def move_to(obj: Any, device: torch.device):
if torch.is_tensor(obj) or isinstance(obj, torch.nn.Module):
return obj.to(device)
if isinstance(obj, dict):
res = {k: move_to(v, device) for k, v in obj.items()}
return res
if isinstance(obj, list):
return [move_to(v, device) for v in obj]
if isinstance(obj, tuple):
return tuple(move_to(list(obj), device))
return obj
def detach(obj: Any):
if torch.is_tensor(obj):
return obj.detach()
if isinstance(obj, dict):
res = {k: detach(v) for k, v in obj.items()}
return res
if isinstance(obj, list):
return [detach(v) for v in obj]
if isinstance(obj, tuple):
return tuple(detach(list(obj)))
raise TypeError("Invalid type for detach") | true | true |
f73033cb691eb8edb9ba077278124a516a5d48f4 | 15,257 | py | Python | bingraphvis/angr/annotator.py | fanyao/bingraphvis | 72f3f6abc0c30bc14916325c886e43dbbd853a97 | [
"BSD-2-Clause"
] | 1 | 2018-11-19T11:03:29.000Z | 2018-11-19T11:03:29.000Z | bingraphvis/angr/annotator.py | fanyao/bingraphvis | 72f3f6abc0c30bc14916325c886e43dbbd853a97 | [
"BSD-2-Clause"
] | null | null | null | bingraphvis/angr/annotator.py | fanyao/bingraphvis | 72f3f6abc0c30bc14916325c886e43dbbd853a97 | [
"BSD-2-Clause"
] | 1 | 2018-11-19T11:03:30.000Z | 2018-11-19T11:03:30.000Z |
from ..base import *
import capstone
import pyvex
class AngrColorSimprocedures(NodeAnnotator):
def __init__(self):
super(AngrColorSimprocedures, self).__init__()
def annotate_node(self, node):
if node.obj.is_simprocedure:
if node.obj.simprocedure_name in ['PathTerminator','ReturnUnconstrained','UnresolvableTarget']:
node.style = 'filled'
node.fillcolor = '#ffcccc'
else:
node.style = 'filled'
node.fillcolor = '#dddddd'
class AngrColorExit(NodeAnnotator):
def __init__(self):
super(AngrColorExit, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
found = False
for e in self.graph.edges:
if e.src == node:
found = True
if 'jumpkind' in e.meta and e.meta['jumpkind'] == 'Ijk_Ret':
node.style = 'filled'
node.fillcolor = '#ddffdd'
if not found:
node.style = 'filled'
node.fillcolor = '#ddffdd'
class AngrColorEntry(NodeAnnotator):
def __init__(self):
super(AngrColorEntry, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
if hasattr(node.obj, 'function_address') and node.obj.addr == node.obj.function_address:
node.style = 'filled'
node.fillcolor = '#ffffcc'
class AngrColorEdgesVex(EdgeAnnotator):
EDGECOLOR_CONDITIONAL_TRUE = 'green'
EDGECOLOR_CONDITIONAL_FALSE = 'red'
EDGECOLOR_UNCONDITIONAL = 'blue'
EDGECOLOR_CALL = 'black'
EDGECOLOR_RET = 'grey'
EDGECOLOR_UNKNOWN = 'yellow'
def __init__(self):
super(AngrColorEdgesVex, self).__init__()
def annotate_edge(self, edge):
vex = None
if 'jumpkind' in edge.meta:
jk = edge.meta['jumpkind']
if jk == 'Ijk_Ret':
edge.color = self.EDGECOLOR_RET
elif jk == 'Ijk_FakeRet':
edge.color = self.EDGECOLOR_RET
edge.style = 'dashed'
elif jk == 'Ijk_Call':
edge.color = self.EDGECOLOR_CALL
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len (vex.next.constants) == 1 and vex.next.constants[0].value != edge.dst.obj.addr:
edge.style='dotted'
elif jk == 'Ijk_Boring':
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len(vex.constant_jump_targets) > 1:
if len (vex.next.constants) == 1:
if edge.dst.obj.addr == vex.next.constants[0].value:
edge.color=self.EDGECOLOR_CONDITIONAL_FALSE
else:
edge.color=self.EDGECOLOR_CONDITIONAL_TRUE
else:
edge.color=self.EDGECOLOR_UNKNOWN
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
#TODO warning
edge.color = self.EDGECOLOR_UNKNOWN
else:
edge.color = self.EDGECOLOR_UNKNOWN
class AngrPathAnnotator(EdgeAnnotator, NodeAnnotator):
def __init__(self, path):
super(AngrPathAnnotator, self).__init__()
self.path = path
self.trace = list(path.addr_trace)
def set_graph(self, graph):
super(AngrPathAnnotator, self).set_graph(graph)
self.vaddr = self.valid_addrs()
ftrace = filter(lambda _: _ in self.vaddr, self.trace)
self.edges_hit = set(zip(ftrace[:-1], ftrace[1:]))
def valid_addrs(self):
vaddr = set()
for n in self.graph.nodes:
vaddr.add(n.obj.addr)
return vaddr
#TODO add caching
#TODO not sure if this is valid
def node_hit(self, node):
ck = list(node.callstack_key)
ck.append(node.addr)
rtrace = list(reversed(self.trace))
found = True
si = 0
for c in reversed(ck):
if c == None:
break
try:
si = rtrace[si:].index(c)
except:
found = False
break
return found
def annotate_edge(self, edge):
key = (edge.src.obj.addr, edge.dst.obj.addr)
if key in self.edges_hit and self.node_hit(edge.src.obj) and self.node_hit(edge.dst.obj):
edge.width = 3
edge.color = 'red'
def annotate_node(self, node):
if self.node_hit(node.obj):
node.width = 3
node.color = 'red'
class AngrBackwardSliceAnnotatorVex(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorVex, self).__init__('vex')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
for k in range(len(content['data'])):
c = content['data'][k]
if k in st:
c['addr']['style'] = 'B'
c['statement']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
if (node.obj, k) in self.targets:
c['addr']['color'] = 'red'
c['statement']['color'] = 'red'
class AngrBackwardSliceAnnotatorAsm(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorAsm, self).__init__('asm')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
staddr = set()
#TODO
vex = self.bs.project.factory.block(addr=node.obj.addr, size=node.obj.size).vex
caddr = None
for j, s in enumerate(vex.statements):
if isinstance(s, pyvex.stmt.IMark):
caddr = s.addr
if j in st:
staddr.add(caddr)
for c in content['data']:
if c['_addr'] in staddr:
c['addr']['style'] = 'B'
c['mnemonic']['style'] = 'B'
c['operands']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
class AngrColorDDGStmtEdges(EdgeAnnotator):
def __init__(self,project=None):
super(AngrColorDDGStmtEdges, self).__init__()
self.project = project
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'tmp':
edge.color = 'blue'
edge.label = 't'+ str(edge.meta['data'])
elif edge.meta['type'] == 'reg':
edge.color = 'green'
if self.project:
edge.label = self.project.arch.register_names[edge.meta['data'].reg] + " " + str(edge.meta['data'].size)
else:
edge.label = "reg"+str(edge.meta['data'].reg) + " " + str(edge.meta['data'].size)
elif edge.meta['type'] == 'mem':
edge.color = 'red'
edge.label = str(edge.meta['data'])
else:
edge.label = edge.meta['type']
edge.style = 'dotted'
class AngrColorDDGData(EdgeAnnotator, NodeAnnotator):
def __init__(self,project=None, labels=False):
super(AngrColorDDGData, self).__init__()
self.project = project
self.labels = labels
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'kill':
edge.color = 'red'
elif edge.meta['type'] == 'mem_addr':
edge.color = 'blue'
edge.style = 'dotted'
elif edge.meta['type'] == 'mem_data':
edge.color = 'blue'
else:
edge.color = 'yellow'
if self.labels:
edge.label = edge.meta['type']
def annotate_node(self, node):
if node.obj.initial:
node.fillcolor = '#ccffcc'
node.style = 'filled'
class AngrActionAnnotatorVex(ContentAnnotator):
def __init__(self):
super(AngrActionAnnotatorVex, self).__init__('vex')
def register(self, content):
content.add_column_after('action_type')
content.add_column_after('action_addr')
content.add_column_after('action_data')
def annotate_content(self, node, content):
from simuvex.s_action import SimActionData
if node.obj.is_simprocedure or node.obj.is_syscall:
return
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
if isinstance(action, SimActionData):
c = content['data'][action.stmt_idx]
c['action_type'] = {
'content': action.type+"/"+action.action+"("+str(action.size.ast)+")",
'align': 'LEFT'
}
#TODO
if str(action.addr) != 'None':
c['action_addr'] = {
'content': str(action.addr.ast),
'align': 'LEFT'
}
if str(action.data) != 'None':
c['action_data'] = {
'content': str(action.data.ast),
'align': 'LEFT'
}
#EXPERIMENTAL
class AngrCodelocLogAnnotator(ContentAnnotator):
def __init__(self, cllog):
super(AngrCodelocLogAnnotator, self).__init__('vex')
self.cllog = cllog
def register(self, content):
content.add_column_after('log')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
for k in range(len(content['data'])):
c = content['data'][k]
key = (node.obj.addr, k)
if key in self.cllog:
c['log'] = {
'content': self.cllog[key],
'align':'LEFT'
}
class AngrCommentsAsm(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsAsm, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
label = ''
if action.type == 'mem' or action.type == 'reg':
if isinstance(action.data.ast, int) or action.data.ast.concrete:
d = state.se.any_int(action.data.ast)
if d in self.project.kb.labels:
label += 'data=' + self.project.kb.labels[d] + ' '
if isinstance(action.addr.ast, int) or action.addr.ast.concrete:
a = state.se.any_int(action.addr.ast)
if a in self.project.kb.labels:
label += 'addr=' + self.project.kb.labels[a] + ' '
if action.type == 'exit':
if action.target.ast.concrete:
a = state.se.any_int(action.target.ast)
if a in self.project.kb.labels:
label += self.project.kb.labels[a] + ' '
if label != '':
comments_by_addr[action.ins_addr] = label
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrCommentsDataRef(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsDataRef, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
for dr in node.obj.accessed_data_references:
if dr.sort == 'string':
comments_by_addr[dr.insn_addr] = dr.content
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrVariables(ContentAnnotator):
def __init__(self, project, debug=False):
super(AngrVariables, self).__init__('asm')
self.project = project
self.debug = debug
def register(self, content):
content.add_column_before('variables')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
vm = self.project.kb.variables[node.obj.function_address]
for k in content['data']:
ins = k['_ins']
vars = vm.find_variables_by_insn(ins.address, 'memory')
if vars:
for var in vars:
if not 'variables' in k:
k['variables'] = {'content':''}
k['variables']['content'] += repr(var[0].name + (' (' + var[0].ident + ')' if self.debug else '') )
k['variables']['color'] = 'lightblue'
k['variables']['align'] = 'LEFT'
| 35.235566 | 124 | 0.509274 |
from ..base import *
import capstone
import pyvex
class AngrColorSimprocedures(NodeAnnotator):
def __init__(self):
super(AngrColorSimprocedures, self).__init__()
def annotate_node(self, node):
if node.obj.is_simprocedure:
if node.obj.simprocedure_name in ['PathTerminator','ReturnUnconstrained','UnresolvableTarget']:
node.style = 'filled'
node.fillcolor = '#ffcccc'
else:
node.style = 'filled'
node.fillcolor = '#dddddd'
class AngrColorExit(NodeAnnotator):
def __init__(self):
super(AngrColorExit, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
found = False
for e in self.graph.edges:
if e.src == node:
found = True
if 'jumpkind' in e.meta and e.meta['jumpkind'] == 'Ijk_Ret':
node.style = 'filled'
node.fillcolor = '#ddffdd'
if not found:
node.style = 'filled'
node.fillcolor = '#ddffdd'
class AngrColorEntry(NodeAnnotator):
def __init__(self):
super(AngrColorEntry, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
if hasattr(node.obj, 'function_address') and node.obj.addr == node.obj.function_address:
node.style = 'filled'
node.fillcolor = '#ffffcc'
class AngrColorEdgesVex(EdgeAnnotator):
EDGECOLOR_CONDITIONAL_TRUE = 'green'
EDGECOLOR_CONDITIONAL_FALSE = 'red'
EDGECOLOR_UNCONDITIONAL = 'blue'
EDGECOLOR_CALL = 'black'
EDGECOLOR_RET = 'grey'
EDGECOLOR_UNKNOWN = 'yellow'
def __init__(self):
super(AngrColorEdgesVex, self).__init__()
def annotate_edge(self, edge):
vex = None
if 'jumpkind' in edge.meta:
jk = edge.meta['jumpkind']
if jk == 'Ijk_Ret':
edge.color = self.EDGECOLOR_RET
elif jk == 'Ijk_FakeRet':
edge.color = self.EDGECOLOR_RET
edge.style = 'dashed'
elif jk == 'Ijk_Call':
edge.color = self.EDGECOLOR_CALL
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len (vex.next.constants) == 1 and vex.next.constants[0].value != edge.dst.obj.addr:
edge.style='dotted'
elif jk == 'Ijk_Boring':
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if len(vex.constant_jump_targets) > 1:
if len (vex.next.constants) == 1:
if edge.dst.obj.addr == vex.next.constants[0].value:
edge.color=self.EDGECOLOR_CONDITIONAL_FALSE
else:
edge.color=self.EDGECOLOR_CONDITIONAL_TRUE
else:
edge.color=self.EDGECOLOR_UNKNOWN
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
edge.color = self.EDGECOLOR_UNKNOWN
else:
edge.color = self.EDGECOLOR_UNKNOWN
class AngrPathAnnotator(EdgeAnnotator, NodeAnnotator):
def __init__(self, path):
super(AngrPathAnnotator, self).__init__()
self.path = path
self.trace = list(path.addr_trace)
def set_graph(self, graph):
super(AngrPathAnnotator, self).set_graph(graph)
self.vaddr = self.valid_addrs()
ftrace = filter(lambda _: _ in self.vaddr, self.trace)
self.edges_hit = set(zip(ftrace[:-1], ftrace[1:]))
def valid_addrs(self):
vaddr = set()
for n in self.graph.nodes:
vaddr.add(n.obj.addr)
return vaddr
def node_hit(self, node):
ck = list(node.callstack_key)
ck.append(node.addr)
rtrace = list(reversed(self.trace))
found = True
si = 0
for c in reversed(ck):
if c == None:
break
try:
si = rtrace[si:].index(c)
except:
found = False
break
return found
def annotate_edge(self, edge):
key = (edge.src.obj.addr, edge.dst.obj.addr)
if key in self.edges_hit and self.node_hit(edge.src.obj) and self.node_hit(edge.dst.obj):
edge.width = 3
edge.color = 'red'
def annotate_node(self, node):
if self.node_hit(node.obj):
node.width = 3
node.color = 'red'
class AngrBackwardSliceAnnotatorVex(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorVex, self).__init__('vex')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
for k in range(len(content['data'])):
c = content['data'][k]
if k in st:
c['addr']['style'] = 'B'
c['statement']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
if (node.obj, k) in self.targets:
c['addr']['color'] = 'red'
c['statement']['color'] = 'red'
class AngrBackwardSliceAnnotatorAsm(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorAsm, self).__init__('asm')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
staddr = set()
vex = self.bs.project.factory.block(addr=node.obj.addr, size=node.obj.size).vex
caddr = None
for j, s in enumerate(vex.statements):
if isinstance(s, pyvex.stmt.IMark):
caddr = s.addr
if j in st:
staddr.add(caddr)
for c in content['data']:
if c['_addr'] in staddr:
c['addr']['style'] = 'B'
c['mnemonic']['style'] = 'B'
c['operands']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
class AngrColorDDGStmtEdges(EdgeAnnotator):
def __init__(self,project=None):
super(AngrColorDDGStmtEdges, self).__init__()
self.project = project
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'tmp':
edge.color = 'blue'
edge.label = 't'+ str(edge.meta['data'])
elif edge.meta['type'] == 'reg':
edge.color = 'green'
if self.project:
edge.label = self.project.arch.register_names[edge.meta['data'].reg] + " " + str(edge.meta['data'].size)
else:
edge.label = "reg"+str(edge.meta['data'].reg) + " " + str(edge.meta['data'].size)
elif edge.meta['type'] == 'mem':
edge.color = 'red'
edge.label = str(edge.meta['data'])
else:
edge.label = edge.meta['type']
edge.style = 'dotted'
class AngrColorDDGData(EdgeAnnotator, NodeAnnotator):
def __init__(self,project=None, labels=False):
super(AngrColorDDGData, self).__init__()
self.project = project
self.labels = labels
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'kill':
edge.color = 'red'
elif edge.meta['type'] == 'mem_addr':
edge.color = 'blue'
edge.style = 'dotted'
elif edge.meta['type'] == 'mem_data':
edge.color = 'blue'
else:
edge.color = 'yellow'
if self.labels:
edge.label = edge.meta['type']
def annotate_node(self, node):
if node.obj.initial:
node.fillcolor = '#ccffcc'
node.style = 'filled'
class AngrActionAnnotatorVex(ContentAnnotator):
def __init__(self):
super(AngrActionAnnotatorVex, self).__init__('vex')
def register(self, content):
content.add_column_after('action_type')
content.add_column_after('action_addr')
content.add_column_after('action_data')
def annotate_content(self, node, content):
from simuvex.s_action import SimActionData
if node.obj.is_simprocedure or node.obj.is_syscall:
return
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
if isinstance(action, SimActionData):
c = content['data'][action.stmt_idx]
c['action_type'] = {
'content': action.type+"/"+action.action+"("+str(action.size.ast)+")",
'align': 'LEFT'
}
if str(action.addr) != 'None':
c['action_addr'] = {
'content': str(action.addr.ast),
'align': 'LEFT'
}
if str(action.data) != 'None':
c['action_data'] = {
'content': str(action.data.ast),
'align': 'LEFT'
}
class AngrCodelocLogAnnotator(ContentAnnotator):
def __init__(self, cllog):
super(AngrCodelocLogAnnotator, self).__init__('vex')
self.cllog = cllog
def register(self, content):
content.add_column_after('log')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
for k in range(len(content['data'])):
c = content['data'][k]
key = (node.obj.addr, k)
if key in self.cllog:
c['log'] = {
'content': self.cllog[key],
'align':'LEFT'
}
class AngrCommentsAsm(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsAsm, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
label = ''
if action.type == 'mem' or action.type == 'reg':
if isinstance(action.data.ast, int) or action.data.ast.concrete:
d = state.se.any_int(action.data.ast)
if d in self.project.kb.labels:
label += 'data=' + self.project.kb.labels[d] + ' '
if isinstance(action.addr.ast, int) or action.addr.ast.concrete:
a = state.se.any_int(action.addr.ast)
if a in self.project.kb.labels:
label += 'addr=' + self.project.kb.labels[a] + ' '
if action.type == 'exit':
if action.target.ast.concrete:
a = state.se.any_int(action.target.ast)
if a in self.project.kb.labels:
label += self.project.kb.labels[a] + ' '
if label != '':
comments_by_addr[action.ins_addr] = label
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrCommentsDataRef(ContentAnnotator):
def __init__(self, project):
super(AngrCommentsDataRef, self).__init__('asm')
self.project = project
def register(self, content):
content.add_column_after('comment')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
comments_by_addr = {}
for dr in node.obj.accessed_data_references:
if dr.sort == 'string':
comments_by_addr[dr.insn_addr] = dr.content
for k in content['data']:
ins = k['_ins']
if ins.address in comments_by_addr:
if not ('comment' in k and 'content' in k['comment']):
k['comment'] = {
'content': "; " + comments_by_addr[ins.address][:100]
}
else:
k['comment']['content'] += ", " + comments_by_addr[ins.address][:100]
k['comment']['color'] = 'gray'
k['comment']['align'] = 'LEFT'
class AngrVariables(ContentAnnotator):
def __init__(self, project, debug=False):
super(AngrVariables, self).__init__('asm')
self.project = project
self.debug = debug
def register(self, content):
content.add_column_before('variables')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
vm = self.project.kb.variables[node.obj.function_address]
for k in content['data']:
ins = k['_ins']
vars = vm.find_variables_by_insn(ins.address, 'memory')
if vars:
for var in vars:
if not 'variables' in k:
k['variables'] = {'content':''}
k['variables']['content'] += repr(var[0].name + (' (' + var[0].ident + ')' if self.debug else '') )
k['variables']['color'] = 'lightblue'
k['variables']['align'] = 'LEFT'
| true | true |
f73033d88a29a228548a8549a992ff3da490ca17 | 3,826 | py | Python | .history/functions_20211223153653.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | .history/functions_20211223153653.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | .history/functions_20211223153653.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | import streamlit as st
import base64
import os
import time
from pdf2docx import Converter
import tempfile
from pathlib import Path
import streamlit as st
from pdf2image import convert_from_path
def show_pdf(uploaded_file):
with st.expander("Original PDF file"):
base64_pdf = base64.b64encode(uploaded_file.read()).decode("utf-8")
pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" width="100%" height="600" type="application/pdf">'
st.markdown(pdf_display, unsafe_allow_html=True)
file_details = {
"filename": uploaded_file.name,
"filetype": uploaded_file.type,
"filesize": uploaded_file.size,
}
st.write(file_details)
st.write(uploaded_file.name)
## Converted images from PDF -- pdf转图片
def pdf2pic(uploaded_file):
# Make temp file path from uploaded file
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
fp = Path(tmp_file.name)
fp.write_bytes(uploaded_file.getvalue())
imgs = convert_from_path(tmp_file.name)
with st.expander("Converted images from PDF"):
st.image(imgs)
class FileDownloader(object):
def __init__(self, data, filename="myfile", file_ext="pdf"):
super(FileDownloader, self).__init__()
self.data = data
self.filename = filename
self.file_ext = file_ext
def download(self):
b64 = base64.b64encode(self.data.encode()).decode()
new_filename = "{}_{}_.{}".format(self.filename, timestr, self.file_ext)
st.markdown("#### Download File ###")
href = f'<a href="data:file/{self.file_ext};base64,{b64}" download="{new_filename}">Click Here!!</a>'
st.markdown(href, unsafe_allow_html=True)
def save_uploadedfile(uploadedfile):
with open(os.path.join("tempDir", uploadedfile.name), "wb") as f:
f.write(uploadedfile.getbuffer())
return st.success("Saved File:{} to tempDir".format(uploadedfile.name))
def convert2docx(uploaded_file):
cv = Converter(uploaded_file)
docx_file = cv.convert(start=0, end=None)
# tables = Converter.extract_tables(pdf_file, start=0, end=1)
# for table in tables:
# print(table)
# streamlit.download_button(label, data, file_name=None, mime=None, key=None, help=None, on_click=None, args=None, kwargs=None)
# 这是官方示例
# @st.cache
# def convert_df(df):
# # IMPORTANT: Cache the conversion to prevent computation on every rerun
# return df.to_csv().encode('utf-8')
# csv = convert_df(my_large_df)
# st.download_button(
# label="Download data as CSV",
# data=csv,
# file_name='large_df.csv',
# mime='text/csv',
# )
import os
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
def pdf_split(pdf_in, pdf_out, start, end):
# 初始化一个pdf
output = PdfFileWriter()
# 读取pdf
with open(pdf_in, "rb") as in_pdf:
pdf_file = PdfFileReader(in_pdf)
# 从pdf中取出指定页
for i in range(start, end):
output.addPage(pdf_file.getPage(i))
# 写出pdf
with open(pdf_out, "ab") as out_pdf:
output.write(out_pdf)
# if __name__ == '__main__':
# pdf_in = '待分割pdf'
# pdf_out = '分割后pdf'
# s,e = pi,po
# pdf_split(pi, po, s, e)
def pdf_merger(in_pdfs, out_pdf):
# 初始化
merger = PdfFileMerger()
# 循环,合并
for in_pdf in in_pdfs:
with open(in_pdf, "rb") as pdf:
merger.append(PdfFileReader(pdf))
merger.write(out_pdf)
# if __name__ == "__main__":
# in_pdfs = ["放要合并的PDF文件名称,注意顺序"]
# out_pdf = "输出文件"
# pdf_merger(in_pdfs, out_pdf)
def body():
st.sidebar.subheader("请选择功能")
feature = st.sidebar.selectbox(
"", ("PDF转word", "PDF转图片", "PDF分割", "PDF合并", "从PDF抽取图片", "从PDF抽取表格")
)
if feature is "PDF转图片":
pdf2pic()
| 28.552239 | 128 | 0.65081 | import streamlit as st
import base64
import os
import time
from pdf2docx import Converter
import tempfile
from pathlib import Path
import streamlit as st
from pdf2image import convert_from_path
def show_pdf(uploaded_file):
with st.expander("Original PDF file"):
base64_pdf = base64.b64encode(uploaded_file.read()).decode("utf-8")
pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" width="100%" height="600" type="application/pdf">'
st.markdown(pdf_display, unsafe_allow_html=True)
file_details = {
"filename": uploaded_file.name,
"filetype": uploaded_file.type,
"filesize": uploaded_file.size,
}
st.write(file_details)
st.write(uploaded_file.name)
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
fp = Path(tmp_file.name)
fp.write_bytes(uploaded_file.getvalue())
imgs = convert_from_path(tmp_file.name)
with st.expander("Converted images from PDF"):
st.image(imgs)
class FileDownloader(object):
def __init__(self, data, filename="myfile", file_ext="pdf"):
super(FileDownloader, self).__init__()
self.data = data
self.filename = filename
self.file_ext = file_ext
def download(self):
b64 = base64.b64encode(self.data.encode()).decode()
new_filename = "{}_{}_.{}".format(self.filename, timestr, self.file_ext)
st.markdown("#### Download File ###")
href = f'<a href="data:file/{self.file_ext};base64,{b64}" download="{new_filename}">Click Here!!</a>'
st.markdown(href, unsafe_allow_html=True)
def save_uploadedfile(uploadedfile):
with open(os.path.join("tempDir", uploadedfile.name), "wb") as f:
f.write(uploadedfile.getbuffer())
return st.success("Saved File:{} to tempDir".format(uploadedfile.name))
def convert2docx(uploaded_file):
cv = Converter(uploaded_file)
docx_file = cv.convert(start=0, end=None)
PdfFileMerger
def pdf_split(pdf_in, pdf_out, start, end):
output = PdfFileWriter()
with open(pdf_in, "rb") as in_pdf:
pdf_file = PdfFileReader(in_pdf)
for i in range(start, end):
output.addPage(pdf_file.getPage(i))
with open(pdf_out, "ab") as out_pdf:
output.write(out_pdf)
def pdf_merger(in_pdfs, out_pdf):
merger = PdfFileMerger()
for in_pdf in in_pdfs:
with open(in_pdf, "rb") as pdf:
merger.append(PdfFileReader(pdf))
merger.write(out_pdf)
def body():
st.sidebar.subheader("请选择功能")
feature = st.sidebar.selectbox(
"", ("PDF转word", "PDF转图片", "PDF分割", "PDF合并", "从PDF抽取图片", "从PDF抽取表格")
)
if feature is "PDF转图片":
pdf2pic()
| true | true |
f730342ab5ef9de61150bbe987c938e724c98ab0 | 793 | py | Python | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | import os
def get_invoice_files(invoices, year=False):
for invoice in invoices:
if invoice.invoice_file:
# Get folder for this invoice and create it if it doesn't exist
if not invoice.afa:
folder = invoice.invoice_type.name
else:
folder = 'afa'
if not os.path.exists(folder):
os.mkdir(folder)
invoice_name = '{}-{}-{}.{}'.format(
invoice.contact_alias,
invoice.invoice_number,
invoice.date.isoformat(),
invoice.invoice_file_type,
)
path = os.path.join(folder, invoice_name)
with open(path, "wb") as invoice_file:
invoice_file.write(invoice.invoice_file)
| 33.041667 | 75 | 0.538462 | import os
def get_invoice_files(invoices, year=False):
for invoice in invoices:
if invoice.invoice_file:
if not invoice.afa:
folder = invoice.invoice_type.name
else:
folder = 'afa'
if not os.path.exists(folder):
os.mkdir(folder)
invoice_name = '{}-{}-{}.{}'.format(
invoice.contact_alias,
invoice.invoice_number,
invoice.date.isoformat(),
invoice.invoice_file_type,
)
path = os.path.join(folder, invoice_name)
with open(path, "wb") as invoice_file:
invoice_file.write(invoice.invoice_file)
| true | true |
f73034f20da654c6f6022c6e0ce37e0082277f05 | 8,692 | py | Python | src/config/train_config.py | wangx1996/CenterPillarNet | 4be3d53265b8ecb1f9572612fa87f7acd8c57669 | [
"MIT"
] | 22 | 2021-03-19T03:13:16.000Z | 2022-03-31T03:05:07.000Z | src/config/train_config.py | wangx1996/CenterPillarNet | 4be3d53265b8ecb1f9572612fa87f7acd8c57669 | [
"MIT"
] | 4 | 2021-04-18T02:23:13.000Z | 2021-08-25T13:21:08.000Z | src/config/train_config.py | wangx1996/CenterPillarNet | 4be3d53265b8ecb1f9572612fa87f7acd8c57669 | [
"MIT"
] | 7 | 2021-06-04T06:54:21.000Z | 2022-01-17T09:18:50.000Z | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: Nguyen Mau Dung
# DoC: 2020.08.17
# email: nguyenmaudung93.kstn@gmail.com
-----------------------------------------------------------------------------------
# Description: The configurations of the project will be defined here
"""
import os
import argparse
import torch
from easydict import EasyDict as edict
import kitti_config as cnf
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation using PyTorch')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
####################################################################
############## Model configs ########################
####################################################################
parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
####################################################################
############## Dataloader and Running configs #######
####################################################################
parser.add_argument('--hflip_prob', type=float, default=0.5,
help='The probability of horizontal flip')
parser.add_argument('--no-val', action='store_true',
help='If true, dont evaluate the model on the val set')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=16,
help='mini-batch size (default: 16), this is the total'
'batch size of all GPUs on the current node when using'
'Data Parallel or Distributed Data Parallel')
parser.add_argument('--print_freq', type=int, default=50, metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',
help='frequency of saving tensorboard (default: 50)')
parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',
help='frequency of saving checkpoints (default: 5)')
####################################################################
############## Training strategy ####################
####################################################################
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='the starting epoch')
parser.add_argument('--num_epochs', type=int, default=300, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_type', type=str, default='cosin',
help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')
parser.add_argument('--lr', type=float, default=0.003, metavar='LR',
help='initial learning rate')
parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',
help='minimum learning rate during training')
parser.add_argument('--momentum', type=float, default=0.949, metavar='M',
help='momentum')
parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',
help='weight decay (default: 0.)')
parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',
help='the type of optimizer, it can be sgd or adam')
parser.add_argument('--steps', nargs='*', default=[150, 180],
help='number of burn in step')
####################################################################
############## Loss weight ##########################
####################################################################
####################################################################
############## Distributed Data Parallel ############
####################################################################
parser.add_argument('--world-size', default=-1, type=int, metavar='N',
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, metavar='N',
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu_idx', default=0, type=int,
help='GPU index to use.')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
####################################################################
############## Evaluation configurations ###################
####################################################################
parser.add_argument('--evaluate', action='store_true',
help='only evaluate the model, not training')
parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',
help='the path of the resumed checkpoint')
parser.add_argument('--K', type=int, default=50,
help='the number of top K')
configs = edict(vars(parser.parse_args()))
####################################################################
############## Hardware configurations #############################
####################################################################
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')
configs.ngpus_per_node = torch.cuda.device_count()
configs.pin_memory = True
configs.input_size = (cnf.BEV_WIDTH, cnf.BEV_HEIGHT)
configs.down_ratio = 2
configs.hm_size = (cnf.BEV_WIDTH/configs.down_ratio, cnf.BEV_HEIGHT/configs.down_ratio)
configs.max_objects = 50
configs.imagenet_pretrained = True
configs.head_conv = 256
configs.num_classes = 1
configs.num_center_offset = 2
configs.num_z = 1
configs.num_dim = 3
configs.num_direction = 2 # sin, cos 8 for bin cos sin
configs.voxel_size = [0.16, 0.16, 4]
configs.point_cloud_range =[0, -34.56, -2.73, 69.12, 34.56, 1.27]
configs.max_number_of_points_per_voxel = 100
configs.heads = {
'hm_cen': configs.num_classes,
'cen_offset': configs.num_center_offset,
'direction': configs.num_direction,
'z_coor': configs.num_z,
'dim': configs.num_dim
}
configs.num_input_features = 4
####################################################################
############## Dataset, logs, Checkpoints dir ######################
####################################################################
configs.dataset_dir = '/media/wx/File/data/kittidata'
configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)
configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)
if not os.path.isdir(configs.checkpoints_dir):
os.makedirs(configs.checkpoints_dir)
if not os.path.isdir(configs.logs_dir):
os.makedirs(configs.logs_dir)
return configs
| 53.654321 | 102 | 0.499655 |
import os
import argparse
import torch
from easydict import EasyDict as edict
import kitti_config as cnf
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation using PyTorch')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
| true | true |
f73035c8e8d37029f07a95dd13aa78a0f9696623 | 22,403 | py | Python | scipy/special/__init__.py | alimuldal/scipy | 713cf7df7b759e2aaeef0f81eb632f48c9b4bae0 | [
"BSD-3-Clause"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | scipy/special/__init__.py | alimuldal/scipy | 713cf7df7b759e2aaeef0f81eb632f48c9b4bae0 | [
"BSD-3-Clause"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | scipy/special/__init__.py | alimuldal/scipy | 713cf7df7b759e2aaeef0f81eb632f48c9b4bae0 | [
"BSD-3-Clause"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | """
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are noted.
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
itairy --
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of real-valued order and complex argument.
jn -- Alias for jv
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
bdtrik --
bdtrin --
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
btdtria --
btdtrib --
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
fdtridfd --
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria -- Inverse with respect to `a` of gdtr.
gdtrib -- Inverse with respect to `b` of gdtr.
gdtrix -- Inverse with respect to `x` of gdtr.
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
nbdtrik --
nbdtrin --
ncfdtr -- CDF of non-central t distribution.
ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.
ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.
ncfdtri -- Inverse CDF of noncentral F distribution.
ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.
nctdtr -- CDF of noncentral t distribution.
nctdtridf -- Find degrees of freedom of noncentral t distribution.
nctdtrit -- Inverse CDF of noncentral t distribution.
nctdtrinc -- Find noncentrality parameter of noncentral t distribution.
nrdtrimn -- Find mean of normal distribution from cdf and std.
nrdtrisd -- Find std of normal distribution from cdf and mean.
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
pdtrik --
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
chdtriv --
ndtr -- Integral from -infinity to x of standard normal pdf
log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
chndtr --
chndtridf --
chndtrinc --
chndtrix --
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + x.
inv_boxcox -- Compute the inverse of the Box-Cox tranformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- entr(x) = -x*log(x)
rel_entr -- rel_entr(x, y) = x*log(x/y)
kl_div -- kl_div(x, y) = x*log(x/y) - x + y
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log transformation of the gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln -- Log of the multivariate gamma.
digamma -- Digamma function (derivative of the logarithm of gamma).
poch -- The Pochhammer symbol (rising factorial).
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind for real arguments.
lqmn -- [+]Associated Legendre Function of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic E
ellip_harm_2 -- Ellipsoidal harmonic F
ellip_normal -- Ellipsoidal normalization constant
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return the polynomial coefficients in
:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Roots and weights for orthogonal polynomials
.. autosummary::
:toctree: generated/
c_roots
cg_roots
h_roots
he_roots
j_roots
js_roots
l_roots
la_roots
p_roots
ps_roots
s_roots
t_roots
ts_roots
u_roots
us_roots
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]Combinations of N things taken k at a time, "N choose k"
perm -- [+]Permutations of N things taken k at a time, "k-permutations of N"
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic-Geometric Mean
bernoulli -- Bernoulli numbers
binom -- Binomial coefficient.
diric -- Dirichlet function (periodic sinc)
euler -- Euler numbers
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
factorial -- The factorial function, n! = special.gamma(n+1)
factorial2 -- Double factorial, (n!)!
factorialk -- [+](...((n!)!)!...)! where there are k '!'
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Dilogarithm integral.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- Standard Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
xlogy -- x*log(y)
xlog1py -- x*log1p(y)
exprel -- (exp(x)-1)/x
sinc -- sin(x)/x
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
| 34.679567 | 121 | 0.658796 |
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
| true | true |
f730363349853b93aa06cd6ec0467353dfec8ae9 | 2,024 | py | Python | config/settings/test.py | seankim84/twitter | 71cbcd821effc4e77588b195d770ef003887d322 | [
"MIT"
] | null | null | null | config/settings/test.py | seankim84/twitter | 71cbcd821effc4e77588b195d770ef003887d322 | [
"MIT"
] | null | null | null | config/settings/test.py | seankim84/twitter | 71cbcd821effc4e77588b195d770ef003887d322 | [
"MIT"
] | null | null | null | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="uaAkGkerZ7vi0VWITpJheK17oRcfIACMfcTmeZhyrF5IG4jJO3ougsdusXKzpyF0")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| 36.142857 | 113 | 0.546443 |
from .base import *
from .base import env
= False
= env("DJANGO_SECRET_KEY", default="uaAkGkerZ7vi0VWITpJheK17oRcfIACMfcTmeZhyrF5IG4jJO3ougsdusXKzpyF0")
= "django.test.runner.DiscoverRunner"
= {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
= ["django.contrib.auth.hashers.MD5PasswordHasher"]
[0]["OPTIONS"]["debug"] = DEBUG
TEMPLATES[0]["OPTIONS"]["loaders"] = [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
= "django.core.mail.backends.locmem.EmailBackend"
= "localhost"
= 1025
| true | true |
f73036fe32a7f58510cda8b92c49c85b9b6d8b44 | 1,195 | py | Python | src/scrape/organisation_model.py | younginnovations/iati-organisations-cleanup | 6a073abbb43957632d988880fc96319da2265e13 | [
"MIT"
] | 1 | 2017-09-07T11:44:56.000Z | 2017-09-07T11:44:56.000Z | src/scrape/organisation_model.py | younginnovations/iati-organisations-cleanup | 6a073abbb43957632d988880fc96319da2265e13 | [
"MIT"
] | null | null | null | src/scrape/organisation_model.py | younginnovations/iati-organisations-cleanup | 6a073abbb43957632d988880fc96319da2265e13 | [
"MIT"
] | null | null | null | from peewee import *
import datetime
from config import *
database = PostgresqlDatabase(POSTGRES_DATABASE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST)
class TblOrganisation(Model):
id = PrimaryKeyField()
identifier = CharField()
type = IntegerField()
country = CharField()
is_org_file = BooleanField(default=False)
is_publisher = BooleanField(default=False)
last_updated = DateTimeField(null=True, default=datetime.datetime.now().strftime('%Y-%m-%d'))
class Meta:
db_table = "organisations"
database = database
class TblName(Model):
organisation = ForeignKeyField(TblOrganisation, to_field="id", related_name='names')
name = TextField()
is_primary = BooleanField(default=True)
language = CharField()
class Meta:
db_table = "names"
database = database
def getLanguages(row):
knownheader = ["name", "identifier", "type", "country", "countrycode", "is_org_file", "is_publisher", "last_updated"]
languages = []
for key in row.keys():
key = key.strip()
if not key in knownheader and not key in languages:
languages.append(key)
return languages
| 33.194444 | 121 | 0.68954 | from peewee import *
import datetime
from config import *
database = PostgresqlDatabase(POSTGRES_DATABASE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST)
class TblOrganisation(Model):
id = PrimaryKeyField()
identifier = CharField()
type = IntegerField()
country = CharField()
is_org_file = BooleanField(default=False)
is_publisher = BooleanField(default=False)
last_updated = DateTimeField(null=True, default=datetime.datetime.now().strftime('%Y-%m-%d'))
class Meta:
db_table = "organisations"
database = database
class TblName(Model):
organisation = ForeignKeyField(TblOrganisation, to_field="id", related_name='names')
name = TextField()
is_primary = BooleanField(default=True)
language = CharField()
class Meta:
db_table = "names"
database = database
def getLanguages(row):
knownheader = ["name", "identifier", "type", "country", "countrycode", "is_org_file", "is_publisher", "last_updated"]
languages = []
for key in row.keys():
key = key.strip()
if not key in knownheader and not key in languages:
languages.append(key)
return languages
| true | true |
f730371b81367b93cefe310f9c09565e96aac8e7 | 1,124 | py | Python | edit/deleteaward.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | edit/deleteaward.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | edit/deleteaward.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | #!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 Al von Ruff and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdb import *
from isfdblib import *
from awardClass import *
from SQLparsing import *
if __name__ == '__main__':
award_id = SESSION.Parameter(0, 'int')
award = SQLloadAwards(award_id)
if not award:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Delete Award Submission')
PrintNavBar('edit/deleteaward.cgi', award_id)
print '<b>Request to Delete:</b> <i>%s</i>' % award[0][AWARD_TITLE]
print '<form METHOD="POST" ACTION="/cgi-bin/edit/submitdelaward.cgi">'
print '<p>'
print '<b>Deletion Reason</b><br>'
print '<textarea name="reason" rows="4" cols="45"></textarea>'
print '<p>'
print '<input name="award_id" value="%d" type="HIDDEN">' % award_id
print '<input type="SUBMIT" value="Delete">'
print '</form>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| 28.1 | 78 | 0.636121 |
from isfdb import *
from isfdblib import *
from awardClass import *
from SQLparsing import *
if __name__ == '__main__':
award_id = SESSION.Parameter(0, 'int')
award = SQLloadAwards(award_id)
if not award:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Delete Award Submission')
PrintNavBar('edit/deleteaward.cgi', award_id)
print '<b>Request to Delete:</b> <i>%s</i>' % award[0][AWARD_TITLE]
print '<form METHOD="POST" ACTION="/cgi-bin/edit/submitdelaward.cgi">'
print '<p>'
print '<b>Deletion Reason</b><br>'
print '<textarea name="reason" rows="4" cols="45"></textarea>'
print '<p>'
print '<input name="award_id" value="%d" type="HIDDEN">' % award_id
print '<input type="SUBMIT" value="Delete">'
print '</form>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| false | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.