code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prototype_activity_about.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 336)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget = QtWidgets.QWidget(Form)
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.widget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(281, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.pushButton_2 = QtWidgets.QPushButton(self.widget)
self.pushButton_2.setText("")
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.verticalLayout_2.addWidget(self.widget)
self.pushButton = QtWidgets.QPushButton(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMinimumSize(QtCore.QSize(70, 70))
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.verticalLayout_2.addWidget(self.pushButton, 0, QtCore.Qt.AlignHCenter)
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_2.addWidget(self.label_3)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_9 = QtWidgets.QLabel(self.groupBox)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 0, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.groupBox)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 0, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.groupBox)
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 0, 2, 1, 1)
self.label_12 = QtWidgets.QLabel(self.groupBox)
self.label_12.setObjectName("label_12")
self.gridLayout_2.addWidget(self.label_12, 0, 3, 1, 1)
self.label_13 = QtWidgets.QLabel(self.groupBox)
self.label_13.setObjectName("label_13")
self.gridLayout_2.addWidget(self.label_13, 0, 4, 1, 1)
self.label_8 = QtWidgets.QLabel(self.groupBox)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 1, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.groupBox)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 1, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.groupBox)
self.label_15.setObjectName("label_15")
self.gridLayout_2.addWidget(self.label_15, 1, 2, 1, 1)
self.label_16 = QtWidgets.QLabel(self.groupBox)
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 1, 3, 1, 1)
self.label_17 = QtWidgets.QLabel(self.groupBox)
self.label_17.setObjectName("label_17")
self.gridLayout_2.addWidget(self.label_17, 1, 4, 1, 1)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_2 = QtWidgets.QGroupBox(Form)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName("gridLayout")
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 0, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.label_5 = QtWidgets.QLabel(self.groupBox_2)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1, QtCore.Qt.AlignHCenter)
self.label_7 = QtWidgets.QLabel(self.groupBox_2)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 1, 1, 1, 1, QtCore.Qt.AlignHCenter)
self.verticalLayout_2.addWidget(self.groupBox_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Sherry"))
self.label_2.setText(_translate("Form", "社会通用主题"))
self.label_3.setText(_translate("Form", "version: 1.0.0"))
self.groupBox.setTitle(_translate("Form", "Thanks For"))
self.label_9.setText(_translate("Form", "· 黄良心"))
self.label_10.setText(_translate("Form", "· 付诗句"))
self.label_11.setText(_translate("Form", "· 韩暑假"))
self.label_12.setText(_translate("Form", "· 刘搜索"))
self.label_13.setText(_translate("Form", "· 诸葛聪明"))
self.label_8.setText(_translate("Form", "· 张开学"))
self.label_14.setText(_translate("Form", "· 方语文"))
self.label_15.setText(_translate("Form", "· 李键盘"))
self.label_16.setText(_translate("Form", "· 吴马路"))
self.label_17.setText(_translate("Form", "· 刘三万"))
self.groupBox_2.setTitle(_translate("Form", "About Author"))
self.label_4.setText(_translate("Form", "黄大胆"))
self.label_6.setText(_translate("Form", "<EMAIL>"))
self.label_5.setText(_translate("Form", "PyQt5"))
self.label_7.setText(_translate("Form", "support"))
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QSpacerItem",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QMetaObject.connectSlotsByName"
] |
[((501, 528), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['Form'], {}), '(Form)\n', (522, 528), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((615, 638), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['Form'], {}), '(Form)\n', (632, 638), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((715, 749), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.widget'], {}), '(self.widget)\n', (736, 749), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((835, 864), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.widget'], {}), '(self.widget)\n', (851, 864), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((980, 1079), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(281)', '(20)', 'QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Minimum'], {}), '(281, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)\n', (1001, 1079), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1153, 1187), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget'], {}), '(self.widget)\n', (1174, 1187), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1420, 1447), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['Form'], {}), '(Form)\n', (1441, 1447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1469, 1561), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Preferred', 'QtWidgets.QSizePolicy.Preferred'], {}), '(QtWidgets.QSizePolicy.Preferred, QtWidgets.\n QSizePolicy.Preferred)\n', (1490, 1561), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2034, 2056), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Form'], {}), '(Form)\n', (2050, 2056), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2237, 2259), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['Form'], {}), '(Form)\n', (2253, 2259), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2441, 2466), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Form'], {}), '(Form)\n', (2460, 2466), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2543, 2579), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupBox'], {}), '(self.groupBox)\n', (2564, 2579), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2659, 2690), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (2675, 2690), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2823, 2854), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (2839, 2854), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2990, 3021), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3006, 3021), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3157, 3188), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3173, 3188), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3324, 3355), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3340, 3355), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3490, 3521), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3506, 3521), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3654, 3685), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3670, 3685), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3821, 3852), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (3837, 3852), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3988, 4019), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (4004, 4019), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4155, 4186), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox'], {}), '(self.groupBox)\n', (4171, 4186), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4379, 4404), 'PyQt5.QtWidgets.QGroupBox', 'QtWidgets.QGroupBox', (['Form'], {}), '(Form)\n', (4398, 4404), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4483, 4521), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4504, 4521), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4597, 4630), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4613, 4630), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4784, 4817), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4800, 4817), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4971, 5004), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (4987, 5004), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5158, 5191), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.groupBox_2'], {}), '(self.groupBox_2)\n', (5174, 5191), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5421, 5464), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['Form'], {}), '(Form)\n', (5458, 5464), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1817, 1837), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(70)', '(70)'], {}), '(70, 70)\n', (1829, 1837), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
class Test_Axis_2_x0x:
def test_matrix_2_x0x(self):
expected = Point([ z, -y, x, 1])
calculated = Point.calculate(mne._matrix_2_x0x)
assert calculated == expected
def test_matrix_2_1_mqx0x_q0q(self):
expected = Point([ z, -y, 1+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqx0x_q0q)
assert calculated == expected
def test_matrix_2_xqx(self):
expected = Point([ z, 1-y, x, 1])
calculated = Point.calculate(mne._matrix_2_xqx)
assert calculated == expected
def test_matrix_2_1_qx0x_q0mq(self):
expected = Point([ 1+z, -y, x, 1])
calculated = Point.calculate(mne._matrix_2_1_qx0x_q0mq)
assert calculated == expected
def test_matrix_2_1_xqx_q0q(self):
expected = Point([ 1+z, 1-y, 1+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xqx_q0q)
assert calculated == expected
def test_matrix_2_1_qx3ox_h0h(self):
expected = Point([ 1.5+z, 1.5-y, 0.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_qx3ox_h0h)
assert calculated == expected
def test_matrix_2_1_mqxox_h0h(self):
expected = Point([ 0.5+z, 0.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqxox_h0h)
assert calculated == expected
def test_matrix_2_1_xox_q0q(self):
expected = Point([ 0.5+z, 0.5-y, 0.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xox_q0q)
assert calculated == expected
def test_matrix_2_1_mqx3ox_h0h(self):
expected = Point([ 0.5+z, 1.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqx3ox_h0h)
assert calculated == expected
def test_matrix_2_1_xox_3q03q(self):
expected = Point([ 1.5+z, 0.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xox_3q03q)
assert calculated == expected
|
[
"sympy.symbols",
"numpy.array",
"equality_check.Point.calculate",
"equality_check.Point"
] |
[((118, 137), 'sympy.symbols', 'sp.symbols', (['"""x y z"""'], {}), "('x y z')\n", (128, 137), True, 'import sympy as sp\n'), ((157, 179), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (165, 179), True, 'import numpy as np\n'), ((258, 278), 'equality_check.Point', 'Point', (['[z, -y, x, 1]'], {}), '([z, -y, x, 1])\n', (263, 278), False, 'from equality_check import Point\n'), ((301, 335), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_x0x'], {}), '(mne._matrix_2_x0x)\n', (316, 335), False, 'from equality_check import Point\n'), ((435, 459), 'equality_check.Point', 'Point', (['[z, -y, 1 + x, 1]'], {}), '([z, -y, 1 + x, 1])\n', (440, 459), False, 'from equality_check import Point\n'), ((480, 522), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqx0x_q0q'], {}), '(mne._matrix_2_1_mqx0x_q0q)\n', (495, 522), False, 'from equality_check import Point\n'), ((614, 637), 'equality_check.Point', 'Point', (['[z, 1 - y, x, 1]'], {}), '([z, 1 - y, x, 1])\n', (619, 637), False, 'from equality_check import Point\n'), ((658, 692), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_xqx'], {}), '(mne._matrix_2_xqx)\n', (673, 692), False, 'from equality_check import Point\n'), ((792, 816), 'equality_check.Point', 'Point', (['[1 + z, -y, x, 1]'], {}), '([1 + z, -y, x, 1])\n', (797, 816), False, 'from equality_check import Point\n'), ((837, 879), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_qx0x_q0mq'], {}), '(mne._matrix_2_1_qx0x_q0mq)\n', (852, 879), False, 'from equality_check import Point\n'), ((977, 1008), 'equality_check.Point', 'Point', (['[1 + z, 1 - y, 1 + x, 1]'], {}), '([1 + z, 1 - y, 1 + x, 1])\n', (982, 1008), False, 'from equality_check import Point\n'), ((1025, 1065), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xqx_q0q'], {}), '(mne._matrix_2_1_xqx_q0q)\n', (1040, 1065), False, 'from equality_check import Point\n'), ((1165, 1202), 'equality_check.Point', 'Point', (['[1.5 + z, 1.5 - y, 0.5 + x, 1]'], {}), '([1.5 + z, 1.5 - y, 0.5 + x, 1])\n', (1170, 1202), False, 'from equality_check import Point\n'), ((1219, 1261), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_qx3ox_h0h'], {}), '(mne._matrix_2_1_qx3ox_h0h)\n', (1234, 1261), False, 'from equality_check import Point\n'), ((1361, 1398), 'equality_check.Point', 'Point', (['[0.5 + z, 0.5 - y, 1.5 + x, 1]'], {}), '([0.5 + z, 0.5 - y, 1.5 + x, 1])\n', (1366, 1398), False, 'from equality_check import Point\n'), ((1415, 1457), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqxox_h0h'], {}), '(mne._matrix_2_1_mqxox_h0h)\n', (1430, 1457), False, 'from equality_check import Point\n'), ((1555, 1592), 'equality_check.Point', 'Point', (['[0.5 + z, 0.5 - y, 0.5 + x, 1]'], {}), '([0.5 + z, 0.5 - y, 0.5 + x, 1])\n', (1560, 1592), False, 'from equality_check import Point\n'), ((1609, 1649), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xox_q0q'], {}), '(mne._matrix_2_1_xox_q0q)\n', (1624, 1649), False, 'from equality_check import Point\n'), ((1750, 1787), 'equality_check.Point', 'Point', (['[0.5 + z, 1.5 - y, 1.5 + x, 1]'], {}), '([0.5 + z, 1.5 - y, 1.5 + x, 1])\n', (1755, 1787), False, 'from equality_check import Point\n'), ((1804, 1847), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqx3ox_h0h'], {}), '(mne._matrix_2_1_mqx3ox_h0h)\n', (1819, 1847), False, 'from equality_check import Point\n'), ((1947, 1984), 'equality_check.Point', 'Point', (['[1.5 + z, 0.5 - y, 1.5 + x, 1]'], {}), '([1.5 + z, 0.5 - y, 1.5 + x, 1])\n', (1952, 1984), False, 'from equality_check import Point\n'), ((2001, 2043), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xox_3q03q'], {}), '(mne._matrix_2_1_xox_3q03q)\n', (2016, 2043), False, 'from equality_check import Point\n')]
|
# Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for encoding and decoding observations."""
import sonnet as snt
import tensorflow as tf
from . import batch_dist
from . import dist_module
from . import util
class EncoderSequence(snt.Sequential):
"""A wrapper arount snt.Sequential that also implements output_size."""
@property
def output_size(self):
return self.layers[-1].output_size
class FlattenEncoder(snt.AbstractModule):
"""Forwards the flattened input."""
def __init__(self, input_size=None, name=None):
super(FlattenEncoder, self).__init__(name=name)
self._input_size = None
if input_size is not None:
self._merge_input_sizes(input_size)
def _merge_input_sizes(self, input_size):
if self._input_size is None:
self._input_size = snt.nest.map(tf.TensorShape, input_size)
return
self._input_size = snt.nest.map(
lambda cur_size, inp_size: cur_size.merge_with(inp_size),
self._input_size,
input_size)
@property
def output_size(self):
"""Returns the output Tensor shapes."""
if self._input_size is None:
return tf.TensorShape([None])
flattened_size = 0
for inp_size in snt.nest.flatten(self._input_size):
num_elements = inp_size.num_elements()
if num_elements is None:
return tf.TensorShape([None])
flattened_size += num_elements
return tf.TensorShape([flattened_size])
def _build(self, inp):
input_sizes = snt.nest.map(lambda inp_i: inp_i.get_shape()[1:], inp)
self._merge_input_sizes(input_sizes)
flatten = snt.BatchFlatten(preserve_dims=1)
flat_inp = snt.nest.map(lambda inp_i: tf.to_float(flatten(inp_i)), inp)
ret = util.concat_features(flat_inp)
util.set_tensor_shapes(ret, self.output_size, add_batch_dims=1)
return ret
def MLPObsEncoder(hparams, name=None):
"""Observation -> encoded, flat observation."""
name = name or "mlp_obs_encoder"
mlp = util.make_mlp(hparams, hparams.obs_encoder_fc_layers,
name=name + "/mlp")
return EncoderSequence([FlattenEncoder(), mlp], name=name)
class DecoderSequence(dist_module.DistModule):
"""A sequence of zero or more AbstractModules, followed by a DistModule."""
def __init__(self, input_encoders, decoder, name=None):
super(DecoderSequence, self).__init__(name=name)
self._input_encoders = input_encoders
self._decoder = decoder
@property
def event_dtype(self):
return self._decoder.event_dtype
@property
def event_size(self):
return self._decoder.event_size
def dist(self, params, name=None):
return self._decoder.dist(params, name=name)
def _build(self, inputs):
if self._input_encoders:
inputs = snt.Sequential(self._input_encoders)(inputs)
return self._decoder(inputs)
def MLPObsDecoder(hparams, decoder, param_size, name=None):
"""Inputs -> decoder(obs; mlp(inputs))."""
name = name or "mlp_" + decoder.module_name
layers = hparams.obs_decoder_fc_hidden_layers + [param_size]
mlp = util.make_mlp(hparams, layers, name=name + "/mlp")
return DecoderSequence([util.concat_features, mlp], decoder, name=name)
class BernoulliDecoder(dist_module.DistModule):
"""Inputs -> Bernoulli(obs; logits=inputs)."""
def __init__(self, dtype=tf.int32, squeeze_input=False, name=None):
self._dtype = dtype
self._squeeze_input = squeeze_input
super(BernoulliDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._dtype
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
if self._squeeze_input:
inputs = tf.squeeze(inputs, axis=-1)
return inputs
def dist(self, params, name=None):
return tf.distributions.Bernoulli(
logits=params,
dtype=self._dtype,
name=name or self.module_name + "_dist")
class BetaDecoder(dist_module.DistModule):
"""Inputs -> Beta(obs; conc1, conc0)."""
def __init__(self,
positive_projection=None,
squeeze_input=False,
name=None):
self._positive_projection = positive_projection
self._squeeze_input = squeeze_input
super(BetaDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.float32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
conc1, conc0 = tf.split(inputs, 2, axis=-1)
if self._positive_projection is not None:
conc1 = self._positive_projection(conc1)
conc0 = self._positive_projection(conc0)
if self._squeeze_input:
conc1 = tf.squeeze(conc1, axis=-1)
conc0 = tf.squeeze(conc0, axis=-1)
return (conc1, conc0)
def dist(self, params, name=None):
conc1, conc0 = params
return tf.distributions.Beta(
conc1, conc0,
name=name or self.module_name + "_dist")
class _BinomialDist(tf.contrib.distributions.Binomial):
"""Work around missing functionality in Binomial."""
def __init__(self, total_count, logits=None, probs=None, name=None):
self._total_count = total_count
super(_BinomialDist, self).__init__(
total_count=tf.to_float(total_count),
logits=logits, probs=probs,
name=name or "Binomial")
def _log_prob(self, counts):
return super(_BinomialDist, self)._log_prob(tf.to_float(counts))
def _sample_n(self, n, seed=None):
all_counts = tf.to_float(tf.range(self._total_count + 1))
for batch_dim in range(self.batch_shape.ndims):
all_counts = tf.expand_dims(all_counts, axis=-1)
all_cdfs = tf.map_fn(self.cdf, all_counts)
shape = tf.concat([[n], self.batch_shape_tensor()], 0)
uniform = tf.random_uniform(shape, seed=seed)
return tf.foldl(
lambda acc, cdfs: tf.where(uniform > cdfs, acc + 1, acc),
all_cdfs,
initializer=tf.zeros(shape, dtype=tf.int32))
class BinomialDecoder(dist_module.DistModule):
"""Inputs -> Binomial(obs; total_count, logits)."""
def __init__(self, total_count=None, squeeze_input=False, name=None):
self._total_count = total_count
self._squeeze_input = squeeze_input
super(BinomialDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.int32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
if self._squeeze_input:
inputs = tf.squeeze(inputs, axis=-1)
return inputs
def dist(self, params, name=None):
return _BinomialDist(
self._total_count,
logits=params,
name=name or self.module_name + "_dist")
class CategoricalDecoder(dist_module.DistModule):
"""Inputs -> Categorical(obs; logits=inputs)."""
def __init__(self, dtype=tf.int32, name=None):
self._dtype = dtype
super(CategoricalDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._dtype
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
return inputs
def dist(self, params, name=None):
return tf.distributions.Categorical(
logits=params,
dtype=self._dtype,
name=name or self.module_name + "_dist")
class NormalDecoder(dist_module.DistModule):
"""Inputs -> Normal(obs; loc=half(inputs), scale=project(half(inputs)))"""
def __init__(self, positive_projection=None, name=None):
self._positive_projection = positive_projection
super(NormalDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.float32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
loc, scale = tf.split(inputs, 2, axis=-1)
if self._positive_projection is not None:
scale = self._positive_projection(scale)
return loc, scale
def dist(self, params, name=None):
loc, scale = params
return tf.distributions.Normal(
loc=loc,
scale=scale,
name=name or self.module_name + "_dist")
class BatchDecoder(dist_module.DistModule):
"""Wrap a decoder to model batches of events."""
def __init__(self, decoder, event_size, name=None):
self._decoder = decoder
self._event_size = tf.TensorShape(event_size)
super(BatchDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._decoder.event_dtype
@property
def event_size(self):
return self._event_size
def _build(self, inputs):
return self._decoder(inputs)
def dist(self, params, name=None):
return batch_dist.BatchDistribution(
self._decoder.dist(params, name=name),
ndims=self._event_size.ndims)
class GroupDecoder(dist_module.DistModule):
"""Group up decoders to model a set of independent of events."""
def __init__(self, decoders, name=None):
self._decoders = decoders
super(GroupDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return snt.nest.map(lambda dec: dec.event_dtype, self._decoders)
@property
def event_size(self):
return snt.nest.map(lambda dec: dec.event_size, self._decoders)
def _build(self, inputs):
return snt.nest.map_up_to(
self._decoders,
lambda dec, input_: dec(input_),
self._decoders, inputs)
def dist(self, params, name=None):
with self._enter_variable_scope():
with tf.name_scope(name or "group"):
dists = snt.nest.map_up_to(
self._decoders,
lambda dec, param: dec.dist(param),
self._decoders, params)
return batch_dist.GroupDistribution(dists, name=name)
|
[
"tensorflow.distributions.Bernoulli",
"sonnet.nest.flatten",
"sonnet.Sequential",
"tensorflow.split",
"tensorflow.distributions.Categorical",
"tensorflow.TensorShape",
"tensorflow.to_float",
"tensorflow.squeeze",
"tensorflow.map_fn",
"sonnet.nest.map",
"tensorflow.name_scope",
"tensorflow.range",
"tensorflow.distributions.Beta",
"tensorflow.distributions.Normal",
"tensorflow.where",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.zeros",
"sonnet.BatchFlatten"
] |
[((1820, 1854), 'sonnet.nest.flatten', 'snt.nest.flatten', (['self._input_size'], {}), '(self._input_size)\n', (1836, 1854), True, 'import sonnet as snt\n'), ((2048, 2080), 'tensorflow.TensorShape', 'tf.TensorShape', (['[flattened_size]'], {}), '([flattened_size])\n', (2062, 2080), True, 'import tensorflow as tf\n'), ((2250, 2283), 'sonnet.BatchFlatten', 'snt.BatchFlatten', ([], {'preserve_dims': '(1)'}), '(preserve_dims=1)\n', (2266, 2283), True, 'import sonnet as snt\n'), ((4338, 4356), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (4352, 4356), True, 'import tensorflow as tf\n'), ((4546, 4652), 'tensorflow.distributions.Bernoulli', 'tf.distributions.Bernoulli', ([], {'logits': 'params', 'dtype': 'self._dtype', 'name': "(name or self.module_name + '_dist')"}), "(logits=params, dtype=self._dtype, name=name or \n self.module_name + '_dist')\n", (4572, 4652), True, 'import tensorflow as tf\n'), ((5186, 5204), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (5200, 5204), True, 'import tensorflow as tf\n'), ((5259, 5287), 'tensorflow.split', 'tf.split', (['inputs', '(2)'], {'axis': '(-1)'}), '(inputs, 2, axis=-1)\n', (5267, 5287), True, 'import tensorflow as tf\n'), ((5685, 5761), 'tensorflow.distributions.Beta', 'tf.distributions.Beta', (['conc1', 'conc0'], {'name': "(name or self.module_name + '_dist')"}), "(conc1, conc0, name=name or self.module_name + '_dist')\n", (5706, 5761), True, 'import tensorflow as tf\n'), ((6537, 6568), 'tensorflow.map_fn', 'tf.map_fn', (['self.cdf', 'all_counts'], {}), '(self.cdf, all_counts)\n', (6546, 6568), True, 'import tensorflow as tf\n'), ((6650, 6685), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'seed': 'seed'}), '(shape, seed=seed)\n', (6667, 6685), True, 'import tensorflow as tf\n'), ((7303, 7321), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (7317, 7321), True, 'import tensorflow as tf\n'), ((8007, 8025), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (8021, 8025), True, 'import tensorflow as tf\n'), ((8134, 8241), 'tensorflow.distributions.Categorical', 'tf.distributions.Categorical', ([], {'logits': 'params', 'dtype': 'self._dtype', 'name': "(name or self.module_name + '_dist')"}), "(logits=params, dtype=self._dtype, name=name or\n self.module_name + '_dist')\n", (8162, 8241), True, 'import tensorflow as tf\n'), ((8698, 8716), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (8712, 8716), True, 'import tensorflow as tf\n'), ((8769, 8797), 'tensorflow.split', 'tf.split', (['inputs', '(2)'], {'axis': '(-1)'}), '(inputs, 2, axis=-1)\n', (8777, 8797), True, 'import tensorflow as tf\n'), ((9010, 9100), 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': 'loc', 'scale': 'scale', 'name': "(name or self.module_name + '_dist')"}), "(loc=loc, scale=scale, name=name or self.module_name +\n '_dist')\n", (9033, 9100), True, 'import tensorflow as tf\n'), ((9349, 9375), 'tensorflow.TensorShape', 'tf.TensorShape', (['event_size'], {}), '(event_size)\n', (9363, 9375), True, 'import tensorflow as tf\n'), ((10138, 10195), 'sonnet.nest.map', 'snt.nest.map', (['(lambda dec: dec.event_dtype)', 'self._decoders'], {}), '(lambda dec: dec.event_dtype, self._decoders)\n', (10150, 10195), True, 'import sonnet as snt\n'), ((10252, 10308), 'sonnet.nest.map', 'snt.nest.map', (['(lambda dec: dec.event_size)', 'self._decoders'], {}), '(lambda dec: dec.event_size, self._decoders)\n', (10264, 10308), True, 'import sonnet as snt\n'), ((1375, 1415), 'sonnet.nest.map', 'snt.nest.map', (['tf.TensorShape', 'input_size'], {}), '(tf.TensorShape, input_size)\n', (1387, 1415), True, 'import sonnet as snt\n'), ((1746, 1768), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (1760, 1768), True, 'import tensorflow as tf\n'), ((4441, 4468), 'tensorflow.squeeze', 'tf.squeeze', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (4451, 4468), True, 'import tensorflow as tf\n'), ((5496, 5522), 'tensorflow.squeeze', 'tf.squeeze', (['conc1'], {'axis': '(-1)'}), '(conc1, axis=-1)\n', (5506, 5522), True, 'import tensorflow as tf\n'), ((5543, 5569), 'tensorflow.squeeze', 'tf.squeeze', (['conc0'], {'axis': '(-1)'}), '(conc0, axis=-1)\n', (5553, 5569), True, 'import tensorflow as tf\n'), ((6274, 6293), 'tensorflow.to_float', 'tf.to_float', (['counts'], {}), '(counts)\n', (6285, 6293), True, 'import tensorflow as tf\n'), ((6368, 6399), 'tensorflow.range', 'tf.range', (['(self._total_count + 1)'], {}), '(self._total_count + 1)\n', (6376, 6399), True, 'import tensorflow as tf\n'), ((6482, 6517), 'tensorflow.expand_dims', 'tf.expand_dims', (['all_counts'], {'axis': '(-1)'}), '(all_counts, axis=-1)\n', (6496, 6517), True, 'import tensorflow as tf\n'), ((7406, 7433), 'tensorflow.squeeze', 'tf.squeeze', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (7416, 7433), True, 'import tensorflow as tf\n'), ((1967, 1989), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (1981, 1989), True, 'import tensorflow as tf\n'), ((3468, 3504), 'sonnet.Sequential', 'snt.Sequential', (['self._input_encoders'], {}), '(self._input_encoders)\n', (3482, 3504), True, 'import sonnet as snt\n'), ((6085, 6109), 'tensorflow.to_float', 'tf.to_float', (['total_count'], {}), '(total_count)\n', (6096, 6109), True, 'import tensorflow as tf\n'), ((6741, 6779), 'tensorflow.where', 'tf.where', (['(uniform > cdfs)', '(acc + 1)', 'acc'], {}), '(uniform > cdfs, acc + 1, acc)\n', (6749, 6779), True, 'import tensorflow as tf\n'), ((6827, 6858), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'tf.int32'}), '(shape, dtype=tf.int32)\n', (6835, 6858), True, 'import tensorflow as tf\n'), ((10584, 10614), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'group')"], {}), "(name or 'group')\n", (10597, 10614), True, 'import tensorflow as tf\n')]
|
"""URLs for testing."""
from django.http import HttpResponse
from django.urls import path
from model_reviews import views
def homeview(request):
"""Return home page."""
return HttpResponse("<h1>home page</h1>")
urlpatterns = [
path("", homeview),
path("bulk", views.BulkReviewsView.as_view()),
path("review/<int:pk>", views.ReviewView.as_view()),
]
|
[
"model_reviews.views.BulkReviewsView.as_view",
"model_reviews.views.ReviewView.as_view",
"django.http.HttpResponse",
"django.urls.path"
] |
[((187, 221), 'django.http.HttpResponse', 'HttpResponse', (['"""<h1>home page</h1>"""'], {}), "('<h1>home page</h1>')\n", (199, 221), False, 'from django.http import HttpResponse\n'), ((244, 262), 'django.urls.path', 'path', (['""""""', 'homeview'], {}), "('', homeview)\n", (248, 262), False, 'from django.urls import path\n'), ((281, 312), 'model_reviews.views.BulkReviewsView.as_view', 'views.BulkReviewsView.as_view', ([], {}), '()\n', (310, 312), False, 'from model_reviews import views\n'), ((343, 369), 'model_reviews.views.ReviewView.as_view', 'views.ReviewView.as_view', ([], {}), '()\n', (367, 369), False, 'from model_reviews import views\n')]
|
import logging
import os
from typing import Iterable, Union
from urllib.parse import urlparse
import redis
from fastjsonschema import JsonSchemaException # type: ignore
from werkzeug.exceptions import BadRequest, HTTPException
from werkzeug.routing import Map
from werkzeug.wrappers import Response
from .topics import Topics
from .utils.types import (
ApplicationConfig,
ServicesConfig,
StartResponse,
WSGIEnvironment,
)
from .utils.wrappers import Request
Apps = (Topics,)
logging.basicConfig(level=logging.DEBUG)
class Application:
"""
Central application container for this service.
During initialization we bind configuration for services to the instance
and also map apps's urls.
"""
def __init__(self, config: ApplicationConfig) -> None:
self.config = config
self.services = config["services"]
self.url_map = Map()
for App in Apps:
self.url_map.add(App(self.services))
def dispatch_request(self, request: Request) -> Union[Response, HTTPException]:
"""
Dispatches request to single apps according to URL rules. Returns a
Response object or a HTTPException (both are WSGI applications
themselves).
"""
adapter = self.url_map.bind_to_environ(request.environ)
try:
rule, arguments = adapter.match(return_rule=True)
response = rule.view.dispatch(request, **arguments)
except JsonSchemaException as exception:
return BadRequest(exception.message)
except HTTPException as exception:
return exception
return response
def wsgi_application(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Creates Werkzeug's Request object, calls the dispatch_request method and
evaluates Response object (or HTTPException) as WSGI application.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Dispatches request to `wsgi_application` method so that one may apply
custom middlewares to the application.
"""
return self.wsgi_application(environ, start_response)
def create_application() -> Application:
"""
Application factory function to create a new instance of the application.
Parses services configuration from environment variables.
"""
# Read environment variables.
database_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_DATABASE_URL", "http://localhost:8008/get-elements"
)
event_store_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_EVENT_STORE_URL",
"http://localhost:8008/save", # TODO: Use correct variables here.
)
locker_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_LOCKER_URL", "http://localhost:6379/0"
)
# Parse OPENSLIDES_WRITE_SERVICE_LOCKER_URL and initiate connection to redis
# with it.
parse_result = urlparse(locker_url)
if not parse_result.hostname or not parse_result.port or not parse_result.path:
raise RuntimeError(
"Bad environment variable OPENSLIDES_WRITE_SERVICE_LOCKER_URL."
)
redis_locker_connection = redis.Redis(
host=parse_result.hostname,
port=parse_result.port,
db=int(parse_result.path.strip("/")),
)
# Create application instance.
application = Application(
ApplicationConfig(
services=ServicesConfig(
database=database_url,
event_store=event_store_url,
locker=redis_locker_connection,
)
)
)
return application
|
[
"logging.basicConfig",
"werkzeug.exceptions.BadRequest",
"os.environ.get",
"werkzeug.routing.Map",
"urllib.parse.urlparse"
] |
[((496, 536), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (515, 536), False, 'import logging\n'), ((2658, 2755), 'os.environ.get', 'os.environ.get', (['"""OPENSLIDES_WRITE_SERVICE_DATABASE_URL"""', '"""http://localhost:8008/get-elements"""'], {}), "('OPENSLIDES_WRITE_SERVICE_DATABASE_URL',\n 'http://localhost:8008/get-elements')\n", (2672, 2755), False, 'import os\n'), ((2788, 2880), 'os.environ.get', 'os.environ.get', (['"""OPENSLIDES_WRITE_SERVICE_EVENT_STORE_URL"""', '"""http://localhost:8008/save"""'], {}), "('OPENSLIDES_WRITE_SERVICE_EVENT_STORE_URL',\n 'http://localhost:8008/save')\n", (2802, 2880), False, 'import os\n'), ((2954, 3039), 'os.environ.get', 'os.environ.get', (['"""OPENSLIDES_WRITE_SERVICE_LOCKER_URL"""', '"""http://localhost:6379/0"""'], {}), "('OPENSLIDES_WRITE_SERVICE_LOCKER_URL', 'http://localhost:6379/0'\n )\n", (2968, 3039), False, 'import os\n'), ((3165, 3185), 'urllib.parse.urlparse', 'urlparse', (['locker_url'], {}), '(locker_url)\n', (3173, 3185), False, 'from urllib.parse import urlparse\n'), ((889, 894), 'werkzeug.routing.Map', 'Map', ([], {}), '()\n', (892, 894), False, 'from werkzeug.routing import Map\n'), ((1517, 1546), 'werkzeug.exceptions.BadRequest', 'BadRequest', (['exception.message'], {}), '(exception.message)\n', (1527, 1546), False, 'from werkzeug.exceptions import BadRequest, HTTPException\n')]
|
from graphql.language import parse
from graphql.utilities import build_schema, find_deprecated_usages
def describe_find_deprecated_usages():
schema = build_schema(
"""
enum EnumType {
NORMAL_VALUE
DEPRECATED_VALUE @deprecated(reason: "Some enum reason.")
}
type Query {
normalField(enumArg: EnumType): String
deprecatedField: String @deprecated(reason: "Some field reason.")
}
"""
)
def should_report_empty_set_for_no_deprecated_usages():
errors = find_deprecated_usages(
schema, parse("{ normalField(enumArg: [NORMAL_VALUE]) }")
)
assert errors == []
def should_ignore_unknown_stuff():
errors = find_deprecated_usages(
schema,
parse(
"""
{
unknownField(unknownArg: UNKNOWN_VALUE)
normalField(enumArg: UNKNOWN_VALUE)
}
"""
),
)
assert errors == []
def should_report_usage_of_deprecated_fields():
errors = find_deprecated_usages(
schema, parse("{ normalField, deprecatedField }")
)
error_messages = [err.message for err in errors]
assert error_messages == [
"The field 'Query.deprecatedField' is deprecated. Some field reason."
]
def should_report_usage_of_deprecated_enums():
errors = find_deprecated_usages(
schema,
parse(
"""
{
normalField(enumArg: [NORMAL_VALUE, DEPRECATED_VALUE])
}
"""
),
)
error_messages = [err.message for err in errors]
assert error_messages == [
"The enum value 'EnumType.DEPRECATED_VALUE' is deprecated."
" Some enum reason."
]
|
[
"graphql.language.parse",
"graphql.utilities.build_schema"
] |
[((157, 478), 'graphql.utilities.build_schema', 'build_schema', (['"""\n enum EnumType {\n NORMAL_VALUE\n DEPRECATED_VALUE @deprecated(reason: "Some enum reason.")\n }\n\n type Query {\n normalField(enumArg: EnumType): String\n deprecatedField: String @deprecated(reason: "Some field reason.")\n }\n """'], {}), '(\n """\n enum EnumType {\n NORMAL_VALUE\n DEPRECATED_VALUE @deprecated(reason: "Some enum reason.")\n }\n\n type Query {\n normalField(enumArg: EnumType): String\n deprecatedField: String @deprecated(reason: "Some field reason.")\n }\n """\n )\n', (169, 478), False, 'from graphql.utilities import build_schema, find_deprecated_usages\n'), ((605, 654), 'graphql.language.parse', 'parse', (['"""{ normalField(enumArg: [NORMAL_VALUE]) }"""'], {}), "('{ normalField(enumArg: [NORMAL_VALUE]) }')\n", (610, 654), False, 'from graphql.language import parse\n'), ((807, 995), 'graphql.language.parse', 'parse', (['"""\n {\n unknownField(unknownArg: UNKNOWN_VALUE)\n normalField(enumArg: UNKNOWN_VALUE)\n }\n """'], {}), '(\n """\n {\n unknownField(unknownArg: UNKNOWN_VALUE)\n normalField(enumArg: UNKNOWN_VALUE)\n }\n """\n )\n', (812, 995), False, 'from graphql.language import parse\n'), ((1170, 1211), 'graphql.language.parse', 'parse', (['"""{ normalField, deprecatedField }"""'], {}), "('{ normalField, deprecatedField }')\n", (1175, 1211), False, 'from graphql.language import parse\n'), ((1533, 1682), 'graphql.language.parse', 'parse', (['"""\n {\n normalField(enumArg: [NORMAL_VALUE, DEPRECATED_VALUE])\n }\n """'], {}), '(\n """\n {\n normalField(enumArg: [NORMAL_VALUE, DEPRECATED_VALUE])\n }\n """\n )\n', (1538, 1682), False, 'from graphql.language import parse\n')]
|
from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence
from sqlalchemy.orm import relationship, backref
def defineRelation11(TableA, TableB):
"""defines relation 1:1 between two tables
Parameters
----------
TableA
Model of first table
TableB
Model of second table
"""
tableAName = TableA.__tablename__
tableBName = TableB.__tablename__
tableBNameSingular = tableBName
if tableBNameSingular[-1] == 's':
tableBNameSingular = tableBNameSingular[:-1]
tableANameSingular = tableAName
if tableANameSingular[-1] == 's':
tableANameSingular = tableANameSingular[:-1]
setattr(TableA, f'{tableBNameSingular}_id', Column(ForeignKey(f'{tableBName}.id')))
setattr(TableA, tableBNameSingular, relationship(TableB, back_populates=f'{tableANameSingular}', uselist=False))
#setattr(TableB, f'{tableANameSingular}_id', Column(ForeignKey(f'{tableAName}.id')))
setattr(TableB, tableANameSingular, relationship(TableA, back_populates=f'{tableBNameSingular}', uselist=False))
return
def defineRelation1N(TableA, TableB, tableAItemName=None, tableBItemName=None):
"""defines relation 1:N (TableA : TableB) between two tables
Parameters
----------
TableA
Model of first table
TableB
Model of second table
tableAItemName: str
if specified, it is the name of the new field in TableB, defining relation to TableA (aka mother => new fields are mother + mother_id)
tableBItemName: str
if specified, it is the name of the new field in TableA, defining set of items from TableB (aka children)
"""
tableAName = TableA.__tablename__ if tableAItemName is None else tableAItemName
tableBName = TableB.__tablename__ if tableBItemName is None else tableBItemName
tableANameSingular = TableA.__tablename__
if tableANameSingular[-1] == 's':
tableANameSingular = tableANameSingular[:-1]
setattr(TableB, f'{tableANameSingular}_id', Column(ForeignKey(f'{tableAName}.id')))
setattr(TableB, tableANameSingular, relationship(TableA, back_populates=f'{tableBName}'))
setattr(TableA, tableBName, relationship(TableB, back_populates=f'{tableANameSingular}')) #relationship(lazy='dynamic')
return
# inspired by and based on https://docs.sqlalchemy.org/en/14/orm/basic_relationships.html
def defineRelationNM(TableA, TableB, sequence=Sequence('all_id_seq'), tableAItemName=None, tableBItemName=None):
"""defines relation N:M (TableA : TableB) between two tables
intermediated table is automaticaly defined
Parameters
----------
TableA
Model of first table
TableB
Model of second table
"""
assert not(sequence is None), "sequence must be defined explicitly"
tableAName = TableA.__tablename__ if tableAItemName is None else tableAItemName
tableBName = TableB.__tablename__ if tableBItemName is None else tableBItemName
interTable = Table(
f'{tableAName}_{tableBName}', TableA.metadata,
Column('id', BigInteger, sequence, primary_key=True),
Column(f'{tableAName}_id', ForeignKey(f'{tableAName}.id'), primary_key=True),
Column(f'{tableBName}_id', ForeignKey(f'{tableBName}.id'), primary_key=True)
)
setattr(TableA, tableBName, relationship(TableB, secondary=interTable, back_populates=tableAName)) #relationship(lazy='dynamic')
setattr(TableB, tableAName, relationship(TableA, secondary=interTable, back_populates=tableBName))
return
|
[
"sqlalchemy.orm.relationship",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column",
"sqlalchemy.Sequence"
] |
[((2424, 2446), 'sqlalchemy.Sequence', 'Sequence', (['"""all_id_seq"""'], {}), "('all_id_seq')\n", (2432, 2446), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n'), ((791, 866), 'sqlalchemy.orm.relationship', 'relationship', (['TableB'], {'back_populates': 'f"""{tableANameSingular}"""', 'uselist': '(False)'}), "(TableB, back_populates=f'{tableANameSingular}', uselist=False)\n", (803, 866), False, 'from sqlalchemy.orm import relationship, backref\n'), ((998, 1073), 'sqlalchemy.orm.relationship', 'relationship', (['TableA'], {'back_populates': 'f"""{tableBNameSingular}"""', 'uselist': '(False)'}), "(TableA, back_populates=f'{tableBNameSingular}', uselist=False)\n", (1010, 1073), False, 'from sqlalchemy.orm import relationship, backref\n'), ((2097, 2149), 'sqlalchemy.orm.relationship', 'relationship', (['TableA'], {'back_populates': 'f"""{tableBName}"""'}), "(TableA, back_populates=f'{tableBName}')\n", (2109, 2149), False, 'from sqlalchemy.orm import relationship, backref\n'), ((2184, 2244), 'sqlalchemy.orm.relationship', 'relationship', (['TableB'], {'back_populates': 'f"""{tableANameSingular}"""'}), "(TableB, back_populates=f'{tableANameSingular}')\n", (2196, 2244), False, 'from sqlalchemy.orm import relationship, backref\n'), ((3059, 3111), 'sqlalchemy.Column', 'Column', (['"""id"""', 'BigInteger', 'sequence'], {'primary_key': '(True)'}), "('id', BigInteger, sequence, primary_key=True)\n", (3065, 3111), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n'), ((3323, 3392), 'sqlalchemy.orm.relationship', 'relationship', (['TableB'], {'secondary': 'interTable', 'back_populates': 'tableAName'}), '(TableB, secondary=interTable, back_populates=tableAName)\n', (3335, 3392), False, 'from sqlalchemy.orm import relationship, backref\n'), ((3456, 3525), 'sqlalchemy.orm.relationship', 'relationship', (['TableA'], {'secondary': 'interTable', 'back_populates': 'tableBName'}), '(TableA, secondary=interTable, back_populates=tableBName)\n', (3468, 3525), False, 'from sqlalchemy.orm import relationship, backref\n'), ((718, 748), 'sqlalchemy.ForeignKey', 'ForeignKey', (['f"""{tableBName}.id"""'], {}), "(f'{tableBName}.id')\n", (728, 748), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n'), ((2024, 2054), 'sqlalchemy.ForeignKey', 'ForeignKey', (['f"""{tableAName}.id"""'], {}), "(f'{tableAName}.id')\n", (2034, 2054), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n'), ((3148, 3178), 'sqlalchemy.ForeignKey', 'ForeignKey', (['f"""{tableAName}.id"""'], {}), "(f'{tableAName}.id')\n", (3158, 3178), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n'), ((3234, 3264), 'sqlalchemy.ForeignKey', 'ForeignKey', (['f"""{tableBName}.id"""'], {}), "(f'{tableBName}.id')\n", (3244, 3264), False, 'from sqlalchemy import Table, Column, ForeignKey, BigInteger, Sequence\n')]
|
# Generated by Django 2.0.6 on 2018-07-18 22:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0034_volunteerapplication_reviewer'),
]
operations = [
migrations.AlterField(
model_name='organizationmembershiprequest',
name='reviewer',
field=models.ForeignKey(blank=True, help_text='User that reviewed the membership application', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reviewed_organization_membership_request', to=settings.AUTH_USER_MODEL, verbose_name='Review author'),
),
migrations.AlterField(
model_name='projecttaskreview',
name='reviewer',
field=models.ForeignKey(blank=True, help_text='The user that did the QA review of this task.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reviewed_project_task', to=settings.AUTH_USER_MODEL, verbose_name='Review author'),
),
migrations.AlterField(
model_name='volunteerapplication',
name='reviewer',
field=models.ForeignKey(blank=True, help_text='The user that did the review of this volunteer application.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reviewed_volunteer_application', to=settings.AUTH_USER_MODEL, verbose_name='Review author'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((442, 721), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""User that reviewed the membership application"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""reviewed_organization_membership_request"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Review author"""'}), "(blank=True, help_text=\n 'User that reviewed the membership application', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name=\n 'reviewed_organization_membership_request', to=settings.AUTH_USER_MODEL,\n verbose_name='Review author')\n", (459, 721), False, 'from django.db import migrations, models\n'), ((837, 1092), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""The user that did the QA review of this task."""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""reviewed_project_task"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Review author"""'}), "(blank=True, help_text=\n 'The user that did the QA review of this task.', null=True, on_delete=\n django.db.models.deletion.CASCADE, related_name='reviewed_project_task',\n to=settings.AUTH_USER_MODEL, verbose_name='Review author')\n", (854, 1092), False, 'from django.db import migrations, models\n'), ((1216, 1499), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""The user that did the review of this volunteer application."""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""reviewed_volunteer_application"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""Review author"""'}), "(blank=True, help_text=\n 'The user that did the review of this volunteer application.', null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'reviewed_volunteer_application', to=settings.AUTH_USER_MODEL,\n verbose_name='Review author')\n", (1233, 1499), False, 'from django.db import migrations, models\n')]
|
from CNF_Creator import *
import numpy as np
import time
import timeit
#---Parameters-----------------------------------------------
num_of_literals = 50 # Number of literals
pop_size = 10 # Population size of each generation
time_limit = 45
p_mutate = 0.9 # Probability of Mutation
p_mutate_literal = 0.1 # Probability of Mutating each literal
p_tournament_sel = 0.9 # Initial probability that random child is chosen over a fit child
beta = 100.0 #Parameter in exponential decay
max_stagnate_cnt = 3000 #Maximum number of epochs for which max_fitness is allowed to stagnate
#=============================================================
class CNF_Model:
'''
Implements a single instance of Valuation that maps literals to True/False
'''
def __init__(self, num_of_literals, arr=None):
'''
Initializes the CNF model
Arguments: num_of_literals -> Number of literals in the model
arr -> Inital values of the Valuation
if None: randomly initialized
'''
self.num_of_literals = num_of_literals
if (arr is None):
self.truth_vals = np.random.randint(0,2, size=num_of_literals)
else:
self.truth_vals = arr
self.fitness_score = -1
def fitness_eval(self, cnf_statement):
'''
Calculates the fitness score of the current valuation over a CNF_Statement
Arguments: cnf_statement -> CNF statement over which fitness is evaluated
Return: fitness_score -> Calculated Fitness score
'''
score = 0.0
for row in cnf_statement:
valid = False
for i in row:
if ((i>0 and self.truth_vals[abs(i)-1]==1) or (i<0 and self.truth_vals[abs(i)-1]==0)):
valid = True
break
if (valid):
score+=1.0
self.fitness_score = float(score)/float(len(cnf_statement))*100.0
return self.fitness_score
def get_fitness_score(self):
'''
Returns the last calculated fitness score of the model
'''
return self.fitness_score
def get_truth_values(self):
'''
Returns Representation of the truth value of the CNF_Model Found
'''
result = []
for i in range(len(self.truth_vals)):
if (self.truth_vals[i]==1):
result.append(i+1)
else:
result.append(-i-1)
return result
class Genetic_Algorithm:
'''
Class that implements the Genetic Algorithm
'''
def __init__(self, num_of_clauses, population_size = 10, num_of_literals = 50):
'''
Initializes the algorithm parameters
Arguments: num_of_clauses -> Number of clauses in the CNF statement
population_size -> Population size of each generation of models
num_of_literals -> Number of literals used in the CNF Statement
'''
self.mutate_p = p_mutate
self.max_fitness_scores = []
self.num_of_clauses = num_of_clauses
self.population_size = population_size
self.num_of_clauses = num_of_clauses
self.num_of_literals = num_of_literals
def init_population(self, cnf_statement):
'''
Creates intial population of CNF Models that is used by the algorithm
Arguments: cnf_statement -> CNF Statement beine evaluated by the class
Returns: population -> Population of CNF Models
'''
population = []
for i in range(self.population_size):
population.append(CNF_Model(self.num_of_literals))
for i in range(self.population_size):
population[i].fitness_eval(cnf_statement)
return population
def Weights(self, models):
'''
Assigns a weight to each CNF model in the population that represent
it's preference to be selected for reproduction by using fitness scores
Arguments: models -> population of models
Returns: weights -> An array of weights of models s
'''
weights = np.zeros(self.population_size)
for i in range(self.population_size):
weights[i] = models[i].get_fitness_score()
sum = weights.sum()
weights = weights/sum
return weights
def reproduce(self, parent_1, parent_2):
'''
Function to perform the Reproduction task by performing Crossover
over a random pivot
Arguments: parent_1, parent_2 -> parent models
Returns: child -> Child model
'''
length = self.num_of_literals
pivot = np.random.randint(length)
child_arr = parent_1.truth_vals[:pivot]
child_arr = np.append(child_arr,parent_2.truth_vals[pivot:])
child = CNF_Model(length, child_arr)
return child
def Mutate(self, child):
'''
Performs Mutation task
Arguments: child -> CNF model to be mutated
'''
for i in range(self.num_of_literals):
if (np.random.random() < p_mutate_literal):
child.truth_vals[i] = 1-child.truth_vals[i]
return
def Tournament_Selection(self, population, pop_size, epoch):
'''
Performs Tournament Selection of the best fit models with a
probability that increases with epoch
Argument: population-> Population in which the best fit are chosen
pop_size-> Size of the final population after best fit
epoch -> Current epoch
Returns: population2 -> Population generated after selection
'''
population2 = []
population.sort(key = lambda x: x.fitness_score, reverse = True)
p = float(p_tournament_sel)**(epoch/beta)
for i in range(0,pop_size):
if (np.random.random() > p ):
population2.append(population[i])
else:
population2.append(population[np.random.randint(pop_size,len(population))])
return population2
def Max_fitness(self, population):
'''
Finds the fitness of the most fit model in the population
Argument: population -> Most fit population
Returns: max_fitness -> Max fitness value
'''
max_fitness = 0
for k in population:
max_fitness = max(max_fitness, k.get_fitness_score())
return max_fitness
def run_algorithm(self, cnf_statement, debug_stmt = False):
'''
Function that performs the Genetic Algorithm on the CNF statement
Arguments: cnf_statement -> CNF statement whose solution has to be generated
debug_stmt -> If True, prints a more verbose info about the algo run
'''
start_time = time.time()
max_fitness = 0
population = self.init_population(cnf_statement)
epoch = 0
time_taken = 0.0
prev_fitness = 0.0
stagnate_cnt = 0
while(max_fitness<100.0):
weights = self.Weights(population)
population2 = population.copy()
for i in range(self.population_size):
parent1, parent2 = np.random.choice(population,2, p=weights)
child = self.reproduce(parent1, parent2)
child.fitness_eval(cnf_statement)
if(np.random.random() < self.mutate_p):
self.Mutate(child)
population2.append(child)
population = self.Tournament_Selection(population2, pop_size, epoch)
max_fitness = self.Max_fitness(population)
self.max_fitness_scores.append(max_fitness)
epoch+=1
if(epoch%1000 == 1 and debug_stmt):
print(f"{epoch} epoch: Fitness score {max_fitness}%\n")
if(abs(prev_fitness - max_fitness)<0.01):
stagnate_cnt+=1
else:
stagnate_cnt =0
prev_fitness = max_fitness
time_taken = time.time() - start_time
if (time_taken> time_limit-0.01):
if (debug_stmt):
print("\nTime limit exceeded, couldn't find a solution\n")
break
if (stagnate_cnt==max_stagnate_cnt):
if (debug_stmt):
print("\nFitness Score stagnated for too long\n")
break
for p in population:
if p.get_fitness_score()==max_fitness:
return p,time_taken
return None,time_taken
def main():
cnfC = CNF_Creator(n=50) # n is number of symbols in the 3-CNF sentence
#sentence = cnfC.CreateRandomSentence(m=120) # m is number of clauses in the 3-CNF sentence
#print('Random sentence : ',sentence)
sentence = cnfC.ReadCNFfromCSVfile()
#print('\nSentence from CSV file : ',sentence)
ga = Genetic_Algorithm(len(sentence))
best_model,time_taken = ga.run_algorithm(sentence)
print('\n\n')
print('Roll No : 2019A7PS0033G')
print('Number of clauses in CSV file : ',len(sentence))
print('Best model : ', best_model.get_truth_values())
print(f'Fitness value of best model : {best_model.get_fitness_score()}%')
print(f'Time taken : {time_taken}')
print('\n\n')
if __name__=='__main__':
main()
|
[
"numpy.zeros",
"time.time",
"numpy.append",
"numpy.random.random",
"numpy.random.randint",
"numpy.random.choice"
] |
[((4268, 4298), 'numpy.zeros', 'np.zeros', (['self.population_size'], {}), '(self.population_size)\n', (4276, 4298), True, 'import numpy as np\n'), ((4827, 4852), 'numpy.random.randint', 'np.random.randint', (['length'], {}), '(length)\n', (4844, 4852), True, 'import numpy as np\n'), ((4921, 4970), 'numpy.append', 'np.append', (['child_arr', 'parent_2.truth_vals[pivot:]'], {}), '(child_arr, parent_2.truth_vals[pivot:])\n', (4930, 4970), True, 'import numpy as np\n'), ((7022, 7033), 'time.time', 'time.time', ([], {}), '()\n', (7031, 7033), False, 'import time\n'), ((1211, 1256), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'num_of_literals'}), '(0, 2, size=num_of_literals)\n', (1228, 1256), True, 'import numpy as np\n'), ((5243, 5261), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5259, 5261), True, 'import numpy as np\n'), ((6043, 6061), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6059, 6061), True, 'import numpy as np\n'), ((7422, 7464), 'numpy.random.choice', 'np.random.choice', (['population', '(2)'], {'p': 'weights'}), '(population, 2, p=weights)\n', (7438, 7464), True, 'import numpy as np\n'), ((8300, 8311), 'time.time', 'time.time', ([], {}), '()\n', (8309, 8311), False, 'import time\n'), ((7591, 7609), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7607, 7609), True, 'import numpy as np\n')]
|
# vim:ts=4:sw=4:ai:et:si:sts=4
import logging
import statistics
import scippy
logger = logging.getLogger(__name__)
class GridMath(object):
@staticmethod
def arithmetic_mean(readings):
return [statistics.mean(item) if item else None for item in readings]
@staticmethod
def harmonic_mean(readings):
return [scipy.hmean(item) if item else None for item in readings]
@staticmethod
def geometric_mean(readings):
return [scipy.gmean(item) if item else None for item in readings]
@staticmethod
def min(readings):
return [min(item) if item else None for item in readings]
@staticmethod
def max(readings):
return [max(item) if item else None for item in readings]
@staticmethod
def median(readings):
return [statistics.median(item) if item else None for item in readings]
@staticmethod
def mode(readings):
return [statistics.mode(item) if item else None for item in readings]
@staticmethod
def variance(readings):
return [statistics.pvariance(item) if item else None
for item in readings]
@staticmethod
def stdev(readings):
return [statistics.pstdev(item) if item else None for item in readings]
|
[
"statistics.median",
"statistics.pstdev",
"statistics.pvariance",
"statistics.mean",
"statistics.mode",
"logging.getLogger"
] |
[((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n'), ((213, 234), 'statistics.mean', 'statistics.mean', (['item'], {}), '(item)\n', (228, 234), False, 'import statistics\n'), ((805, 828), 'statistics.median', 'statistics.median', (['item'], {}), '(item)\n', (822, 828), False, 'import statistics\n'), ((928, 949), 'statistics.mode', 'statistics.mode', (['item'], {}), '(item)\n', (943, 949), False, 'import statistics\n'), ((1053, 1079), 'statistics.pvariance', 'statistics.pvariance', (['item'], {}), '(item)\n', (1073, 1079), False, 'import statistics\n'), ((1196, 1219), 'statistics.pstdev', 'statistics.pstdev', (['item'], {}), '(item)\n', (1213, 1219), False, 'import statistics\n')]
|
import time
from collections import defaultdict
from typing import List, Dict
import numpy as np
def timed(callback, *args, **kwargs):
start = time.time()
result = callback(*args, **kwargs)
return result, time.time() - start
class Timer:
def __init__(self):
self.start = 0.
self.result = 0.
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.result = time.time() - self.start
def reset(self):
r = self.result
self.start = 0.
self.result = 0.
return r
class MultiTimer:
def __init__(self):
self._results: Dict[List[float]] = defaultdict(list)
self._currently_measuring = None
def time(self, fieldname):
"""
Starts measuring.
"""
if self._currently_measuring is not None:
raise RuntimeError("I was already measuring {}")
self._currently_measuring = fieldname
self._results[fieldname].append(time.time())
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._results[self._currently_measuring][-1] = time.time() - self._results[self._currently_measuring][-1]
self._currently_measuring = None
def get_results(self, reset=False, reduce=True):
result = self._results
if reduce:
result = {k: np.mean(v) for k, v in result.items()}
if reset:
self.reset()
return result
def reset(self):
self._results = defaultdict(list)
self._currently_measuring = None
|
[
"collections.defaultdict",
"numpy.mean",
"time.time"
] |
[((150, 161), 'time.time', 'time.time', ([], {}), '()\n', (159, 161), False, 'import time\n'), ((376, 387), 'time.time', 'time.time', ([], {}), '()\n', (385, 387), False, 'import time\n'), ((707, 724), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (718, 724), False, 'from collections import defaultdict\n'), ((1610, 1627), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1621, 1627), False, 'from collections import defaultdict\n'), ((220, 231), 'time.time', 'time.time', ([], {}), '()\n', (229, 231), False, 'import time\n'), ((482, 493), 'time.time', 'time.time', ([], {}), '()\n', (491, 493), False, 'import time\n'), ((1045, 1056), 'time.time', 'time.time', ([], {}), '()\n', (1054, 1056), False, 'import time\n'), ((1231, 1242), 'time.time', 'time.time', ([], {}), '()\n', (1240, 1242), False, 'import time\n'), ((1460, 1470), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (1467, 1470), True, 'import numpy as np\n')]
|
"""
Tests for job management
"""
import unittest
import copy
import itertools
from unittest import mock
import re
import os
from IPython.display import HTML
import biokbase.narrative.jobs.jobmanager
from biokbase.narrative.jobs.jobmanager import (
JOB_NOT_REG_ERR,
JOB_NOT_BATCH_ERR,
JOBS_MISSING_FALSY_ERR,
get_error_output_state,
)
from biokbase.narrative.jobs.job import (
Job,
EXCLUDED_JOB_STATE_FIELDS,
JOB_INIT_EXCLUDED_JOB_STATE_FIELDS,
JOB_ATTR_DEFAULTS,
)
from biokbase.narrative.exception_util import (
NarrativeException,
JobIDException,
)
from biokbase.narrative.jobs.jobmanager import JOBS_TYPE_ERR
from .util import ConfigTests
from .test_job import (
JOB_COMPLETED,
JOB_CREATED,
JOB_RUNNING,
JOB_TERMINATED,
JOB_ERROR,
BATCH_PARENT,
BATCH_COMPLETED,
BATCH_TERMINATED,
BATCH_TERMINATED_RETRIED,
BATCH_ERROR_RETRIED,
BATCH_RETRY_COMPLETED,
BATCH_RETRY_RUNNING,
BATCH_RETRY_ERROR,
JOB_NOT_FOUND,
JOBS_TERMINALITY,
ALL_JOBS,
TERMINAL_JOBS,
ACTIVE_JOBS,
BATCH_CHILDREN,
get_test_job,
get_test_job_state,
get_test_spec,
TEST_JOBS,
get_test_job_states,
get_cell_2_jobs,
)
from .narrative_mock.mockclients import (
get_mock_client,
get_failing_mock_client,
assert_obj_method_called,
MockClients,
)
__author__ = "<NAME> <<EMAIL>>"
TERMINAL_IDS = [JOB_COMPLETED, JOB_TERMINATED, JOB_ERROR]
NON_TERMINAL_IDS = [JOB_CREATED, JOB_RUNNING]
ERR_STR = "Some error occurred"
def create_jm_message(r_type, job_id=None, data=None):
if data is None:
data = {}
data["request_type"] = r_type
data["job_id"] = job_id
return {"content": {"data": data}}
def get_retry_job_state(orig_id, status="unmocked"):
return {
"state": {
"job_id": orig_id[::-1],
"status": status,
"batch_id": None,
"job_output": {},
"cell_id": None,
"run_id": None,
"child_jobs": [],
},
"cell_id": None,
"widget_info": None,
"user": None,
}
def get_test_job_infos(job_ids):
return {job_id: get_test_job_info(job_id) for job_id in job_ids}
def get_test_job_info(job_id):
test_job = get_test_job(job_id)
job_id = test_job.get("job_id")
app_id = test_job.get("job_input", {}).get("app_id", None)
tag = (
test_job.get("job_input", {})
.get("narrative_cell_info", {})
.get("tag", "release")
)
params = test_job.get("job_input", {}).get("params", JOB_ATTR_DEFAULTS["params"])
batch_job = test_job.get("batch_job", JOB_ATTR_DEFAULTS["batch_job"])
app_name = "batch" if batch_job else get_test_spec(tag, app_id)["info"]["name"]
batch_id = (
job_id if batch_job else test_job.get("batch_id", JOB_ATTR_DEFAULTS["batch_id"])
)
return {
"app_id": app_id,
"app_name": app_name,
"job_id": job_id,
"job_params": params,
"batch_id": batch_id,
}
class JobManagerTest(unittest.TestCase):
@classmethod
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def setUpClass(cls):
cls.job_ids = list(TEST_JOBS.keys())
config = ConfigTests()
os.environ["KB_WORKSPACE_ID"] = config.get("jobs", "job_test_wsname")
cls.maxDiff = None
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def setUp(self) -> None:
self.jm = biokbase.narrative.jobs.jobmanager.JobManager()
self.jm.initialize_jobs()
self.job_states = get_test_job_states()
def validate_status_message(self, msg):
core_keys = set(["widget_info", "user", "state"])
state_keys = set(
["user", "authstrat", "wsid", "status", "updated", "job_input"]
)
if not core_keys.issubset(set(msg.keys())):
print(
"Missing core key(s) - [{}]".format(
", ".join(core_keys.difference(set(msg.keys())))
)
)
return False
if not state_keys.issubset(set(msg["state"].keys())):
print(
"Missing status key(s) - [{}]".format(
", ".join(state_keys.difference(set(msg["state"].keys())))
)
)
return False
return True
@mock.patch(
"biokbase.narrative.clients.get", get_failing_mock_client
)
def test_initialize_jobs_ee2_fail(self):
# init jobs should fail. specifically, ee2.check_workspace_jobs should error.
with self.assertRaises(NarrativeException) as e:
self.jm.initialize_jobs()
self.assertIn("Job lookup failed", str(e.exception))
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_initialize_jobs(self):
# all jobs have been removed from the JobManager
self.jm._running_jobs = {}
self.jm = biokbase.narrative.jobs.jobmanager.JobManager()
self.assertEqual(self.jm._running_jobs, {})
# redo the initialise to make sure it worked correctly
self.jm.initialize_jobs()
terminal_ids = [
job_id
for job_id, d in self.jm._running_jobs.items()
if d["job"].was_terminal()
]
self.assertEqual(
set(TERMINAL_JOBS),
set(terminal_ids),
)
self.assertEqual(set(self.job_ids), set(self.jm._running_jobs.keys()))
for job_id in TERMINAL_IDS:
self.assertFalse(self.jm._running_jobs[job_id]["refresh"])
for job_id in NON_TERMINAL_IDS:
self.assertTrue(self.jm._running_jobs[job_id]["refresh"])
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_initialize_jobs__cell_ids(self):
"""
Invoke initialize_jobs with cell_ids
"""
cell_2_jobs = get_cell_2_jobs()
cell_ids = list(cell_2_jobs.keys())
# Iterate through all combinations of cell IDs
for combo_len in range(len(cell_ids) + 1):
for combo in itertools.combinations(cell_ids, combo_len):
combo = list(combo)
# Get jobs expected to be associated with the cell IDs
exp_job_ids = [
job_id
for cell_id, job_ids in cell_2_jobs.items()
for job_id in job_ids
if cell_id in combo
]
self.jm._running_jobs = {}
self.jm.initialize_jobs(cell_ids=combo)
for job_id, d in self.jm._running_jobs.items():
refresh = d["refresh"]
self.assertEqual(
int(job_id in exp_job_ids and not JOBS_TERMINALITY[job_id]),
refresh,
)
def test__check_job(self):
for job_id in ALL_JOBS:
self.jm._check_job(job_id)
def test__check_job_fail(self):
with self.assertRaisesRegex(JobIDException, f"{JOB_NOT_REG_ERR}: {None}"):
self.jm._check_job(None)
with self.assertRaisesRegex(JobIDException, f"{JOB_NOT_REG_ERR}: {JOB_NOT_FOUND}"):
self.jm._check_job(JOB_NOT_FOUND)
def test__check_job_list_fail(self):
with self.assertRaisesRegex(TypeError, f"{JOBS_TYPE_ERR}: {None}"):
self.jm._check_job_list(None)
with self.assertRaisesRegex(JobIDException, re.escape(f"{JOBS_MISSING_FALSY_ERR}: {[]}")):
self.jm._check_job_list([])
with self.assertRaisesRegex(JobIDException, re.escape(f'{JOBS_MISSING_FALSY_ERR}: {["", "", None]}')):
self.jm._check_job_list(["", "", None])
def test__check_job_list(self):
"""job list checker"""
job_a = JOB_CREATED
job_b = JOB_COMPLETED
job_c = "job_c"
job_d = "job_d"
self.assertEqual(
self.jm._check_job_list([job_c]),
(
[],
[job_c],
),
)
self.assertEqual(
self.jm._check_job_list([job_c, None, "", job_c, job_c, None, job_d]),
(
[],
[job_c, job_d],
),
)
self.assertEqual(
self.jm._check_job_list([job_c, None, "", None, job_a, job_a, job_a]),
(
[job_a],
[job_c],
),
)
self.assertEqual(
self.jm._check_job_list([None, job_a, None, "", None, job_b]),
(
[job_a, job_b],
[],
),
)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test__construct_job_output_state_set(self):
self.assertEqual(
self.jm._construct_job_output_state_set(ALL_JOBS), get_test_job_states()
)
def test__construct_job_output_state_set__empty_list(self):
self.assertEqual(self.jm._construct_job_output_state_set([]), {})
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test__construct_job_output_state_set__ee2_error(self):
def mock_check_jobs(self, params):
raise Exception("Test exception")
with mock.patch.object(MockClients, "check_jobs", side_effect=mock_check_jobs):
job_states = self.jm._construct_job_output_state_set(ALL_JOBS)
self.assertEqual(
{
**get_test_job_states(TERMINAL_JOBS),
**{
job_id: get_error_output_state(job_id, "ee2_error")
for job_id in ACTIVE_JOBS
}
},
job_states
)
def test__create_jobs__empty_list(self):
self.assertEqual(self.jm._create_jobs([]), {})
def test__create_jobs__jobs_already_exist(self):
job_list = self.jm._running_jobs.keys()
self.assertEqual(self.jm._create_jobs(job_list), {})
def test_get_job_good(self):
job_id = self.job_ids[0]
job = self.jm.get_job(job_id)
self.assertEqual(job_id, job.job_id)
self.assertIsInstance(job, Job)
def test_get_job_bad(self):
with self.assertRaisesRegex(
JobIDException, f"{JOB_NOT_REG_ERR}: not_a_job_id"
):
self.jm.get_job("not_a_job_id")
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_list_jobs_html(self):
jobs_html = self.jm.list_jobs()
self.assertIsInstance(jobs_html, HTML)
html = jobs_html.data
self.assertIn("<td>5d64935ab215ad4128de94d6</td>", html)
self.assertIn("<td>NarrativeTest/test_editor</td>", html)
self.assertIn("<td>2019-08-26 ", html)
self.assertIn(":54:48</td>", html)
self.assertIn("<td>fake_test_user</td>", html)
self.assertIn("<td>completed</td>", html)
self.assertIn("<td>Not started</td>", html)
self.assertIn("<td>Incomplete</td>", html)
def test_list_jobs_twice(self):
# with no jobs
with mock.patch.object(self.jm, "_running_jobs", {}):
expected = "No running jobs!"
self.assertEqual(self.jm.list_jobs(), expected)
self.assertEqual(self.jm.list_jobs(), expected)
# with some jobs
with mock.patch(
"biokbase.narrative.clients.get", get_mock_client
):
jobs_html_0 = self.jm.list_jobs().data
jobs_html_1 = self.jm.list_jobs().data
try:
self.assertEqual(jobs_html_0, jobs_html_1)
except AssertionError:
# Sometimes the time is off by a second
# This will still fail if on the hour
pattern = r"(\d\d:)\d\d:\d\d"
sub = r"\1"
jobs_html_0 = re.sub(pattern, sub, jobs_html_0)
jobs_html_1 = re.sub(pattern, sub, jobs_html_1)
self.assertEqual(jobs_html_0, jobs_html_1)
def test_cancel_jobs__bad_inputs(self):
with self.assertRaisesRegex(JobIDException, re.escape(f"{JOBS_MISSING_FALSY_ERR}: {[]}")):
self.jm.cancel_jobs([])
with self.assertRaisesRegex(JobIDException, re.escape(f'{JOBS_MISSING_FALSY_ERR}: {["", "", None]}')):
self.jm.cancel_jobs(["", "", None])
job_states = self.jm.cancel_jobs([JOB_NOT_FOUND])
self.assertEqual({JOB_NOT_FOUND: get_error_output_state(JOB_NOT_FOUND)}, job_states)
def test_cancel_jobs__job_already_finished(self):
self.assertEqual(get_test_job(JOB_COMPLETED)["status"], "completed")
self.assertEqual(get_test_job(JOB_TERMINATED)["status"], "terminated")
self.assertTrue(self.jm.get_job(JOB_COMPLETED).was_terminal())
self.assertTrue(self.jm.get_job(JOB_TERMINATED).was_terminal())
with mock.patch(
"biokbase.narrative.jobs.jobmanager.JobManager._cancel_job"
) as mock_cancel_job:
canceled_jobs = self.jm.cancel_jobs([JOB_COMPLETED, JOB_TERMINATED])
mock_cancel_job.assert_not_called()
self.assertEqual(
{
JOB_COMPLETED: self.job_states[JOB_COMPLETED],
JOB_TERMINATED: self.job_states[JOB_TERMINATED],
},
canceled_jobs,
)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_cancel_jobs__run_ee2_cancel_job(self):
"""cancel a set of jobs that run cancel_job on ee2"""
# jobs list:
jobs = [
None,
JOB_CREATED,
JOB_RUNNING,
"",
JOB_TERMINATED,
JOB_COMPLETED,
JOB_TERMINATED,
None,
JOB_NOT_FOUND,
]
expected = {
JOB_CREATED: self.job_states[JOB_CREATED],
JOB_RUNNING: self.job_states[JOB_RUNNING],
JOB_COMPLETED: self.job_states[JOB_COMPLETED],
JOB_TERMINATED: self.job_states[JOB_TERMINATED],
JOB_NOT_FOUND: {
"state": {
"job_id": JOB_NOT_FOUND,
"status": "does_not_exist",
}
},
}
self.jm._running_jobs[JOB_RUNNING]["refresh"] = 1
self.jm._running_jobs[JOB_CREATED]["refresh"] = 1
def check_state(arg):
self.assertEqual(self.jm._running_jobs[arg["job_id"]]["refresh"], 0)
self.assertEqual(self.jm._running_jobs[arg["job_id"]]["canceling"], True)
# patch MockClients.cancel_job so we can test the input
with mock.patch.object(
MockClients,
"cancel_job",
mock.Mock(return_value={}, side_effect=check_state),
) as mock_cancel_job:
results = self.jm.cancel_jobs(jobs)
self.assertNotIn("canceling", self.jm._running_jobs[JOB_RUNNING])
self.assertNotIn("canceling", self.jm._running_jobs[JOB_CREATED])
self.assertEqual(self.jm._running_jobs[JOB_RUNNING]["refresh"], 1)
self.assertEqual(self.jm._running_jobs[JOB_CREATED]["refresh"], 1)
self.assertEqual(results.keys(), expected.keys())
self.assertEqual(results, expected)
mock_cancel_job.assert_has_calls(
[
mock.call({"job_id": JOB_RUNNING}),
mock.call({"job_id": JOB_CREATED}),
],
any_order=True,
)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_cancel_jobs(self):
with assert_obj_method_called(self.jm, "cancel_jobs", True):
self.jm.cancel_jobs([JOB_COMPLETED])
def _check_retry_jobs(
self,
expected,
retry_results,
):
self.assertEqual(expected, retry_results)
orig_ids = [
result["job"]["state"]["job_id"]
for result in retry_results
if "error" not in result
]
retry_ids = [
result["retry"]["state"]["job_id"]
for result in retry_results
if "error" not in result
]
dne_ids = [
result["job"]["state"]["job_id"]
for result in retry_results
if result["job"]["state"]["status"] == "does_not_exist"
]
for job_id in orig_ids + retry_ids:
job = self.jm.get_job(job_id)
self.assertIn(job_id, self.jm._running_jobs)
self.assertIsNotNone(job._acc_state)
for job_id in dne_ids:
self.assertNotIn(job_id, self.jm._running_jobs)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__success(self):
job_ids = [JOB_TERMINATED]
expected = [
{
"job": self.job_states[JOB_TERMINATED],
"retry": get_retry_job_state(JOB_TERMINATED),
}
]
retry_results = self.jm.retry_jobs(job_ids)
self._check_retry_jobs(expected, retry_results)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__multi_success(self):
job_ids = [JOB_TERMINATED, JOB_ERROR]
expected = [
{
"job": self.job_states[JOB_TERMINATED],
"retry": get_retry_job_state(JOB_TERMINATED),
},
{
"job": self.job_states[JOB_ERROR],
"retry": get_retry_job_state(JOB_ERROR),
},
]
retry_results = self.jm.retry_jobs(job_ids)
self._check_retry_jobs(expected, retry_results)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__success_error_dne(self):
job_ids = [JOB_NOT_FOUND, JOB_TERMINATED, JOB_COMPLETED]
expected = [
{
"job": self.job_states[JOB_TERMINATED],
"retry": get_retry_job_state(JOB_TERMINATED),
},
{
"job": self.job_states[JOB_COMPLETED],
"error": ERR_STR,
},
{
"job": get_error_output_state(JOB_NOT_FOUND),
"error": "does_not_exist",
},
]
ee2_ret = [
{"job_id": JOB_TERMINATED, "retry_id": JOB_TERMINATED[::-1]},
{"job_id": JOB_COMPLETED, "error": ERR_STR},
]
with mock.patch.object(
MockClients,
"retry_jobs",
mock.Mock(return_value=ee2_ret),
):
retry_results = self.jm.retry_jobs(job_ids)
self._check_retry_jobs(expected, retry_results)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__all_error(self):
job_ids = [JOB_TERMINATED, JOB_CREATED, JOB_RUNNING]
expected = [
{"job": self.job_states[JOB_TERMINATED], "error": ERR_STR},
{"job": self.job_states[JOB_CREATED], "error": ERR_STR},
{"job": self.job_states[JOB_RUNNING], "error": ERR_STR},
]
ee2_ret = [
{"job_id": JOB_TERMINATED, "error": ERR_STR},
{"job_id": JOB_CREATED, "error": ERR_STR},
{"job_id": JOB_RUNNING, "error": ERR_STR},
]
with mock.patch.object(
MockClients,
"retry_jobs",
mock.Mock(return_value=ee2_ret),
):
retry_results = self.jm.retry_jobs(job_ids)
self._check_retry_jobs(expected, retry_results)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__retry_already_terminal(self):
job_id = JOB_TERMINATED
retry_id = JOB_TERMINATED[::-1]
retry_status = "error"
expected = [
{
"job": self.job_states[JOB_TERMINATED],
"retry": get_retry_job_state(JOB_TERMINATED, status=retry_status),
}
]
test_jobs_ = copy.deepcopy(TEST_JOBS)
test_jobs_[retry_id] = {"job_id": retry_id, "status": retry_status}
with mock.patch.object(
MockClients,
"ee2_job_info",
test_jobs_,
):
retry_results = self.jm.retry_jobs([job_id])
self._check_retry_jobs(expected, retry_results)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_retry_jobs__none_exist(self):
dne_id = "nope"
job_ids = ["", "", None, dne_id]
expected = [
{
"job": get_error_output_state(dne_id),
"error": "does_not_exist",
}
]
retry_results = self.jm.retry_jobs(job_ids)
self._check_retry_jobs(expected, retry_results)
def test_retry_jobs__bad_inputs(self):
with self.assertRaisesRegex(JobIDException, re.escape(f"{JOBS_MISSING_FALSY_ERR}: {[]}")):
self.jm.retry_jobs([])
with self.assertRaisesRegex(JobIDException, re.escape(f'{JOBS_MISSING_FALSY_ERR}: {["", "", None]}')):
self.jm.retry_jobs(["", "", None])
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_lookup_all_job_states(self):
states = self.jm.lookup_all_job_states()
self.assertEqual(set(ACTIVE_JOBS), set(states.keys()))
self.assertEqual(
states,
{id: self.job_states[id] for id in ACTIVE_JOBS},
)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_lookup_all_job_states__ignore_refresh_flag(self):
states = self.jm.lookup_all_job_states(ignore_refresh_flag=True)
self.assertEqual(set(self.job_ids), set(states.keys()))
self.assertEqual(states, self.job_states)
# @mock.patch('biokbase.narrative.clients.get', get_mock_client)
# def test_job_status_fetching(self):
# self.jm._handle_comm_message(create_jm_message("all_status"))
# msg = self.jm._comm.last_message
# job_data = msg.get('data', {}).get('content', {})
# job_ids = list(job_data.keys())
# # assert that each job info that's flagged for lookup gets returned
# jobs_to_lookup = [j for j in self.jm._running_jobs.keys()]
# self.assertCountEqual(job_ids, jobs_to_lookup)
# for job_id in job_ids:
# self.assertTrue(self.validate_status_message(job_data[job_id]))
# self.jm._comm.clear_message_cache()
# @mock.patch('biokbase.narrative.clients.get', get_mock_client)
# def test_single_job_status_fetch(self):
# new_job = phony_job()
# self.jm.register_new_job(new_job)
# self.jm._handle_comm_message(create_jm_message("job_status", new_job.job_id))
# msg = self.jm._comm.last_message
# self.assertEqual(msg['data']['msg_type'], "job_status")
# # self.assertTrue(self.validate_status_message(msg['data']['content']))
# self.jm._comm.clear_message_cache()
# Should "fail" based on sent message.
# def test_job_message_bad_id(self):
# self.jm._handle_comm_message(create_jm_message("foo", job_id="not_a_real_job"))
# msg = self.jm._comm.last_message
# self.assertEqual(msg['data']['msg_type'], 'job_does_not_exist')
def test_cancel_job_lookup(self):
pass
# @mock.patch('biokbase.narrative.clients.get', get_mock_client)
# def test_stop_single_job_lookup(self):
# # Set up and make sure the job gets returned correctly.
# new_job = phony_job()
# phony_id = new_job.job_id
# self.jm.register_new_job(new_job)
# self.jm._handle_comm_message(create_jm_message("start_job_update", job_id=phony_id))
# self.jm._handle_comm_message(create_jm_message("stop_update_loop"))
# self.jm._lookup_all_job_status()
# msg = self.jm._comm.last_message
# self.assertTrue(phony_id in msg['data']['content'])
# self.assertEqual(msg['data']['content'][phony_id].get('listener_count', 0), 1)
# self.jm._comm.clear_message_cache()
# self.jm._handle_comm_message(create_jm_message("stop_job_update", job_id=phony_id))
# self.jm._lookup_all_job_status()
# msg = self.jm._comm.last_message
# self.assertTrue(self.jm._running_jobs[phony_id]['refresh'] == 0)
# self.assertIsNone(msg)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_get_job_states(self):
job_ids = [
None,
None,
JOB_CREATED,
JOB_NOT_FOUND,
JOB_CREATED,
JOB_RUNNING,
JOB_TERMINATED,
JOB_COMPLETED,
BATCH_PARENT,
"",
JOB_NOT_FOUND,
]
exp = {
**{
job_id: self.job_states[job_id]
for job_id in [
JOB_CREATED,
JOB_RUNNING,
JOB_TERMINATED,
JOB_COMPLETED,
BATCH_PARENT,
]
},
JOB_NOT_FOUND: get_error_output_state(JOB_NOT_FOUND),
}
res = self.jm.get_job_states(job_ids)
self.assertEqual(exp, res)
def test_get_job_states__empty(self):
with self.assertRaisesRegex(JobIDException, re.escape(f"{JOBS_MISSING_FALSY_ERR}: {[]}")):
self.jm.get_job_states([])
def test_update_batch_job__dne(self):
with self.assertRaisesRegex(
JobIDException, f"{JOB_NOT_REG_ERR}: {JOB_NOT_FOUND}"
):
self.jm.update_batch_job(JOB_NOT_FOUND)
def test_update_batch_job__not_batch(self):
with self.assertRaisesRegex(JobIDException, f"{JOB_NOT_BATCH_ERR}: {JOB_CREATED}"):
self.jm.update_batch_job(JOB_CREATED)
with self.assertRaisesRegex(JobIDException, f"{JOB_NOT_BATCH_ERR}: {BATCH_TERMINATED}"):
self.jm.update_batch_job(BATCH_TERMINATED)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_update_batch_job__no_change(self):
job_ids = self.jm.update_batch_job(BATCH_PARENT)
self.assertEqual(BATCH_PARENT, job_ids[0])
self.assertCountEqual(BATCH_CHILDREN, job_ids[1:])
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_update_batch_job__change(self):
"""test child ids having changed"""
new_child_ids = BATCH_CHILDREN[1:] + [JOB_CREATED, JOB_NOT_FOUND]
def mock_check_job(params):
"""Called from job.state()"""
job_id = params["job_id"]
if job_id == BATCH_PARENT:
return {"child_jobs": new_child_ids}
elif job_id in TEST_JOBS:
return get_test_job(job_id)
elif job_id == JOB_NOT_FOUND:
return {"job_id": job_id, "status": "does_not_exist"}
else:
raise Exception()
with mock.patch.object(
MockClients, "check_job", side_effect=mock_check_job
) as m:
job_ids = self.jm.update_batch_job(BATCH_PARENT)
m.assert_has_calls(
[
mock.call(
{
"job_id": BATCH_PARENT,
"exclude_fields": EXCLUDED_JOB_STATE_FIELDS,
}
),
mock.call(
{
"job_id": JOB_NOT_FOUND,
"exclude_fields": JOB_INIT_EXCLUDED_JOB_STATE_FIELDS,
}
),
]
)
self.assertEqual(BATCH_PARENT, job_ids[0])
self.assertCountEqual(new_child_ids, job_ids[1:])
batch_job = self.jm.get_job(BATCH_PARENT)
reg_child_jobs = [
self.jm.get_job(job_id) for job_id in batch_job._acc_state["child_jobs"]
]
self.assertCountEqual(batch_job.children, reg_child_jobs)
self.assertCountEqual(batch_job._acc_state["child_jobs"], new_child_ids)
with mock.patch.object(
MockClients, "check_job", side_effect=mock_check_job
) as m:
self.assertCountEqual(batch_job.child_jobs, new_child_ids)
def test_modify_job_refresh(self):
for job_id, terminality in JOBS_TERMINALITY.items():
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"], int(not terminality)
)
self.jm.modify_job_refresh([job_id], -1) # stop
self.assertEqual(self.jm._running_jobs[job_id]["refresh"], 0)
self.jm.modify_job_refresh([job_id], -1) # stop
self.assertEqual(self.jm._running_jobs[job_id]["refresh"], 0)
self.jm.modify_job_refresh([job_id], 1) # start
self.assertEqual(self.jm._running_jobs[job_id]["refresh"], 1)
self.jm.modify_job_refresh([job_id], 1) # start
self.assertEqual(self.jm._running_jobs[job_id]["refresh"], 2)
self.jm.modify_job_refresh([job_id], -1) # stop
self.assertEqual(self.jm._running_jobs[job_id]["refresh"], 1)
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_lookup_job_info(self):
infos = self.jm.lookup_job_info(ALL_JOBS)
self.assertCountEqual(ALL_JOBS, infos.keys())
self.assertEqual(get_test_job_infos(ALL_JOBS), infos)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"unittest.mock.patch.object",
"copy.deepcopy",
"biokbase.narrative.jobs.jobmanager.get_error_output_state",
"unittest.mock.Mock",
"re.escape",
"unittest.mock.patch",
"itertools.combinations",
"unittest.mock.call",
"re.sub"
] |
[((3108, 3169), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (3118, 3169), False, 'from unittest import mock\n'), ((3382, 3443), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (3392, 3443), False, 'from unittest import mock\n'), ((4384, 4453), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_failing_mock_client'], {}), "('biokbase.narrative.clients.get', get_failing_mock_client)\n", (4394, 4453), False, 'from unittest import mock\n'), ((4761, 4822), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (4771, 4822), False, 'from unittest import mock\n'), ((5722, 5783), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (5732, 5783), False, 'from unittest import mock\n'), ((8668, 8729), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (8678, 8729), False, 'from unittest import mock\n'), ((9048, 9109), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (9058, 9109), False, 'from unittest import mock\n'), ((10373, 10434), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (10383, 10434), False, 'from unittest import mock\n'), ((13366, 13427), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (13376, 13427), False, 'from unittest import mock\n'), ((15523, 15584), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (15533, 15584), False, 'from unittest import mock\n'), ((16659, 16720), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (16669, 16720), False, 'from unittest import mock\n'), ((17088, 17149), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (17098, 17149), False, 'from unittest import mock\n'), ((17672, 17733), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (17682, 17733), False, 'from unittest import mock\n'), ((18699, 18760), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (18709, 18760), False, 'from unittest import mock\n'), ((19562, 19623), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (19572, 19623), False, 'from unittest import mock\n'), ((20344, 20405), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (20354, 20405), False, 'from unittest import mock\n'), ((21123, 21184), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (21133, 21184), False, 'from unittest import mock\n'), ((21462, 21523), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (21472, 21523), False, 'from unittest import mock\n'), ((24369, 24430), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (24379, 24430), False, 'from unittest import mock\n'), ((25973, 26034), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (25983, 26034), False, 'from unittest import mock\n'), ((26256, 26317), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (26266, 26317), False, 'from unittest import mock\n'), ((29123, 29184), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (29133, 29184), False, 'from unittest import mock\n'), ((29421, 29436), 'unittest.main', 'unittest.main', ([], {}), '()\n', (29434, 29436), False, 'import unittest\n'), ((20003, 20027), 'copy.deepcopy', 'copy.deepcopy', (['TEST_JOBS'], {}), '(TEST_JOBS)\n', (20016, 20027), False, 'import copy\n'), ((6114, 6157), 'itertools.combinations', 'itertools.combinations', (['cell_ids', 'combo_len'], {}), '(cell_ids, combo_len)\n', (6136, 6157), False, 'import itertools\n'), ((9276, 9349), 'unittest.mock.patch.object', 'mock.patch.object', (['MockClients', '"""check_jobs"""'], {'side_effect': 'mock_check_jobs'}), "(MockClients, 'check_jobs', side_effect=mock_check_jobs)\n", (9293, 9349), False, 'from unittest import mock\n'), ((11089, 11136), 'unittest.mock.patch.object', 'mock.patch.object', (['self.jm', '"""_running_jobs"""', '{}'], {}), "(self.jm, '_running_jobs', {})\n", (11106, 11136), False, 'from unittest import mock\n'), ((11339, 11400), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.clients.get"""', 'get_mock_client'], {}), "('biokbase.narrative.clients.get', get_mock_client)\n", (11349, 11400), False, 'from unittest import mock\n'), ((12869, 12940), 'unittest.mock.patch', 'mock.patch', (['"""biokbase.narrative.jobs.jobmanager.JobManager._cancel_job"""'], {}), "('biokbase.narrative.jobs.jobmanager.JobManager._cancel_job')\n", (12879, 12940), False, 'from unittest import mock\n'), ((20117, 20175), 'unittest.mock.patch.object', 'mock.patch.object', (['MockClients', '"""ee2_job_info"""', 'test_jobs_'], {}), "(MockClients, 'ee2_job_info', test_jobs_)\n", (20134, 20175), False, 'from unittest import mock\n'), ((25102, 25139), 'biokbase.narrative.jobs.jobmanager.get_error_output_state', 'get_error_output_state', (['JOB_NOT_FOUND'], {}), '(JOB_NOT_FOUND)\n', (25124, 25139), False, 'from biokbase.narrative.jobs.jobmanager import JOB_NOT_REG_ERR, JOB_NOT_BATCH_ERR, JOBS_MISSING_FALSY_ERR, get_error_output_state\n'), ((26950, 27021), 'unittest.mock.patch.object', 'mock.patch.object', (['MockClients', '"""check_job"""'], {'side_effect': 'mock_check_job'}), "(MockClients, 'check_job', side_effect=mock_check_job)\n", (26967, 27021), False, 'from unittest import mock\n'), ((28047, 28118), 'unittest.mock.patch.object', 'mock.patch.object', (['MockClients', '"""check_job"""'], {'side_effect': 'mock_check_job'}), "(MockClients, 'check_job', side_effect=mock_check_job)\n", (28064, 28118), False, 'from unittest import mock\n'), ((7487, 7531), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[]}"""'], {}), "(f'{JOBS_MISSING_FALSY_ERR}: {[]}')\n", (7496, 7531), False, 'import re\n'), ((7627, 7683), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}"""'], {}), '(f"{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}")\n', (7636, 7683), False, 'import re\n'), ((12106, 12150), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[]}"""'], {}), "(f'{JOBS_MISSING_FALSY_ERR}: {[]}')\n", (12115, 12150), False, 'import re\n'), ((12242, 12298), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}"""'], {}), '(f"{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}")\n', (12251, 12298), False, 'import re\n'), ((12449, 12486), 'biokbase.narrative.jobs.jobmanager.get_error_output_state', 'get_error_output_state', (['JOB_NOT_FOUND'], {}), '(JOB_NOT_FOUND)\n', (12471, 12486), False, 'from biokbase.narrative.jobs.jobmanager import JOB_NOT_REG_ERR, JOB_NOT_BATCH_ERR, JOBS_MISSING_FALSY_ERR, get_error_output_state\n'), ((14721, 14772), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': '{}', 'side_effect': 'check_state'}), '(return_value={}, side_effect=check_state)\n', (14730, 14772), False, 'from unittest import mock\n'), ((18172, 18209), 'biokbase.narrative.jobs.jobmanager.get_error_output_state', 'get_error_output_state', (['JOB_NOT_FOUND'], {}), '(JOB_NOT_FOUND)\n', (18194, 18209), False, 'from biokbase.narrative.jobs.jobmanager import JOB_NOT_REG_ERR, JOB_NOT_BATCH_ERR, JOBS_MISSING_FALSY_ERR, get_error_output_state\n'), ((18536, 18567), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': 'ee2_ret'}), '(return_value=ee2_ret)\n', (18545, 18567), False, 'from unittest import mock\n'), ((19399, 19430), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': 'ee2_ret'}), '(return_value=ee2_ret)\n', (19408, 19430), False, 'from unittest import mock\n'), ((20572, 20602), 'biokbase.narrative.jobs.jobmanager.get_error_output_state', 'get_error_output_state', (['dne_id'], {}), '(dne_id)\n', (20594, 20602), False, 'from biokbase.narrative.jobs.jobmanager import JOB_NOT_REG_ERR, JOB_NOT_BATCH_ERR, JOBS_MISSING_FALSY_ERR, get_error_output_state\n'), ((20876, 20920), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[]}"""'], {}), "(f'{JOBS_MISSING_FALSY_ERR}: {[]}')\n", (20885, 20920), False, 'import re\n'), ((21011, 21067), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}"""'], {}), '(f"{JOBS_MISSING_FALSY_ERR}: {[\'\', \'\', None]}")\n', (21020, 21067), False, 'import re\n'), ((25328, 25372), 're.escape', 're.escape', (['f"""{JOBS_MISSING_FALSY_ERR}: {[]}"""'], {}), "(f'{JOBS_MISSING_FALSY_ERR}: {[]}')\n", (25337, 25372), False, 'import re\n'), ((27170, 27255), 'unittest.mock.call', 'mock.call', (["{'job_id': BATCH_PARENT, 'exclude_fields': EXCLUDED_JOB_STATE_FIELDS}"], {}), "({'job_id': BATCH_PARENT, 'exclude_fields': EXCLUDED_JOB_STATE_FIELDS}\n )\n", (27179, 27255), False, 'from unittest import mock\n'), ((27377, 27471), 'unittest.mock.call', 'mock.call', (["{'job_id': JOB_NOT_FOUND, 'exclude_fields': JOB_INIT_EXCLUDED_JOB_STATE_FIELDS}"], {}), "({'job_id': JOB_NOT_FOUND, 'exclude_fields':\n JOB_INIT_EXCLUDED_JOB_STATE_FIELDS})\n", (27386, 27471), False, 'from unittest import mock\n'), ((9569, 9612), 'biokbase.narrative.jobs.jobmanager.get_error_output_state', 'get_error_output_state', (['job_id', '"""ee2_error"""'], {}), "(job_id, 'ee2_error')\n", (9591, 9612), False, 'from biokbase.narrative.jobs.jobmanager import JOB_NOT_REG_ERR, JOB_NOT_BATCH_ERR, JOBS_MISSING_FALSY_ERR, get_error_output_state\n'), ((11852, 11885), 're.sub', 're.sub', (['pattern', 'sub', 'jobs_html_0'], {}), '(pattern, sub, jobs_html_0)\n', (11858, 11885), False, 'import re\n'), ((11916, 11949), 're.sub', 're.sub', (['pattern', 'sub', 'jobs_html_1'], {}), '(pattern, sub, jobs_html_1)\n', (11922, 11949), False, 'import re\n'), ((15360, 15394), 'unittest.mock.call', 'mock.call', (["{'job_id': JOB_RUNNING}"], {}), "({'job_id': JOB_RUNNING})\n", (15369, 15394), False, 'from unittest import mock\n'), ((15416, 15450), 'unittest.mock.call', 'mock.call', (["{'job_id': JOB_CREATED}"], {}), "({'job_id': JOB_CREATED})\n", (15425, 15450), False, 'from unittest import mock\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2013-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import itertools
import os
import sys
import time
from .confparser import ConfParser
from .confparser import ConfParserException
from .palette import Palette
from .transformer import Transformer
from .transformer import RegexStyle
from .txtsconf import *
from .version import VERSION
VERSION_INFO="""TxtStyle version %s.
Copyright (C) 2013 <NAME>.
Apache License v2.0 or later: <http://www.apache.org/licenses/LICENSE-2.0>
""" % VERSION
_USER_HOME_CONF_FILE = os.path.join(os.getenv('HOME'), '.txts.conf')
class Txts(object):
def __init__(self, styles, filepath=None, color_always=False):
self.transformer = Transformer(styles)
self.filepath = filepath
self.color_always = color_always
def transform(self):
if self.filepath:
self._transform_file()
elif not sys.stdin.isatty():
self._transform_pipe()
def _transform_file(self):
try:
with open(self.filepath, 'r', encoding='utf-8', errors='ignore') as infile:
for line in infile:
self._style(line)
except KeyboardInterrupt:
pass
except IOError as e:
if e.errno == errno.ENOENT:
sys.stderr.write("File not found: %s\n" % self.filepath)
elif e.errno == errno.EPIPE:
# broken pipe
pass
else:
sys.stderr.write("%s\n" % e)
sys.exit(e.errno)
def _transform_pipe(self):
sys.stdin = sys.stdin.detach()
try:
while True:
line = sys.stdin.readline()
if not line:
break
self._style(line.decode('utf-8', errors='ignore'))
except KeyboardInterrupt:
pass
finally:
sys.stdin.close()
def _style(self, line):
if sys.stdout.isatty() or self.color_always:
styled_line = self.transformer.style(line.strip('\n'))
sys.stdout.write(styled_line + '\n')
else:
sys.stdout.write(line)
def parse_args():
parser = argparse.ArgumentParser(
prog='TxtStyle',
description='Prettifies output of console programs.')
parser.add_argument('filepath', nargs='?', help='Path to a file.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-p', '--palette', help='Print a palette of available styles.', action='store_true')
group.add_argument('-n', '--name', nargs=1, help='Name of the style to apply.')
group.add_argument('-r', '--regex', nargs=1, action='append', help='Highlight text based on the given regular expression.')
parser.add_argument('-c', '--conf', nargs=1, help='Path to a conf file. Default is: ~/.txt.conf')
parser.add_argument('--color-always', help='Always use color. Similar to grep --color=always.', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--version', help='Print version information', action='store_true')
return parser.parse_args()
def get_styles(conf_parser, style_def_name):
try:
return conf_parser.get_styles(style_def_name)
except ConfParserException as e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
def get_conf_lines(args):
confpath = get_conf_path(args)
with open(confpath, 'r') as f:
return f.readlines()
def get_conf_path(args):
if args.conf:
# User-specified conf file
filepath = args.conf[0]
if not os.path.isfile(filepath):
sys.stderr.write("File not found: %s\n" % filepath)
sys.exit(errno.ENOENT)
return filepath
else:
# User-home conf file (~/.txt.conf)
if not os.path.isfile(_USER_HOME_CONF_FILE):
with open(_USER_HOME_CONF_FILE, 'w+') as f:
f.write(DEFAULT_CONF)
return _USER_HOME_CONF_FILE
def loop_default_colors():
while True:
for style in ['bold','underline']:
for col in ['red', 'green', 'blue', 'magenta', 'cyan', 'white']:
yield ( col, style )
def main():
args = parse_args()
styles = []
if args.version:
sys.stdout.write(VERSION_INFO)
sys.exit(0)
elif args.palette:
Palette().print_palette()
sys.exit(0)
elif args.name:
conf_lines = get_conf_lines(args)
conf_parser = ConfParser(conf_lines)
style_def_name = args.name[0]
styles = get_styles(conf_parser, style_def_name)
elif args.regex:
rexps = list(itertools.chain.from_iterable(args.regex))
styles = [ RegexStyle(regex, style) for regex, style in zip(rexps, loop_default_colors()) ]
txts = Txts(styles, args.filepath, args.color_always)
txts.transform()
if __name__ == "__main__":
main()
|
[
"sys.stdin.detach",
"sys.stdout.write",
"sys.stdin.isatty",
"argparse.ArgumentParser",
"sys.stdin.readline",
"sys.stdin.close",
"os.path.isfile",
"sys.stdout.isatty",
"sys.stderr.write",
"itertools.chain.from_iterable",
"os.getenv",
"sys.exit"
] |
[((1103, 1120), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1112, 1120), False, 'import os\n'), ((2744, 2843), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""TxtStyle"""', 'description': '"""Prettifies output of console programs."""'}), "(prog='TxtStyle', description=\n 'Prettifies output of console programs.')\n", (2767, 2843), False, 'import argparse\n'), ((2142, 2160), 'sys.stdin.detach', 'sys.stdin.detach', ([], {}), '()\n', (2158, 2160), False, 'import sys\n'), ((4836, 4866), 'sys.stdout.write', 'sys.stdout.write', (['VERSION_INFO'], {}), '(VERSION_INFO)\n', (4852, 4866), False, 'import sys\n'), ((4875, 4886), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4883, 4886), False, 'import sys\n'), ((2445, 2462), 'sys.stdin.close', 'sys.stdin.close', ([], {}), '()\n', (2460, 2462), False, 'import sys\n'), ((2503, 2522), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (2520, 2522), False, 'import sys\n'), ((2624, 2660), 'sys.stdout.write', 'sys.stdout.write', (["(styled_line + '\\n')"], {}), "(styled_line + '\\n')\n", (2640, 2660), False, 'import sys\n'), ((2687, 2709), 'sys.stdout.write', 'sys.stdout.write', (['line'], {}), '(line)\n', (2703, 2709), False, 'import sys\n'), ((3865, 3893), 'sys.stderr.write', 'sys.stderr.write', (["('%s\\n' % e)"], {}), "('%s\\n' % e)\n", (3881, 3893), False, 'import sys\n'), ((3902, 3913), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3910, 3913), False, 'import sys\n'), ((4166, 4190), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (4180, 4190), False, 'import os\n'), ((4204, 4255), 'sys.stderr.write', 'sys.stderr.write', (["('File not found: %s\\n' % filepath)"], {}), "('File not found: %s\\n' % filepath)\n", (4220, 4255), False, 'import sys\n'), ((4268, 4290), 'sys.exit', 'sys.exit', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (4276, 4290), False, 'import sys\n'), ((4384, 4420), 'os.path.isfile', 'os.path.isfile', (['_USER_HOME_CONF_FILE'], {}), '(_USER_HOME_CONF_FILE)\n', (4398, 4420), False, 'import os\n'), ((4952, 4963), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4960, 4963), False, 'import sys\n'), ((1450, 1468), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (1466, 1468), False, 'import sys\n'), ((2072, 2089), 'sys.exit', 'sys.exit', (['e.errno'], {}), '(e.errno)\n', (2080, 2089), False, 'import sys\n'), ((2222, 2242), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2240, 2242), False, 'import sys\n'), ((1848, 1904), 'sys.stderr.write', 'sys.stderr.write', (["('File not found: %s\\n' % self.filepath)"], {}), "('File not found: %s\\n' % self.filepath)\n", (1864, 1904), False, 'import sys\n'), ((2031, 2059), 'sys.stderr.write', 'sys.stderr.write', (["('%s\\n' % e)"], {}), "('%s\\n' % e)\n", (2047, 2059), False, 'import sys\n'), ((5208, 5249), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['args.regex'], {}), '(args.regex)\n', (5237, 5249), False, 'import itertools\n')]
|
import praw, datetime, pytz
from . import utils, exceptions
def r2md(post, **kwargs):
'''
Convert given post to markdown and save it as a file in current path as given file name.
Parameters:
post (:obj:`praw.models.Submission`): A Praw's submission object
file_name (str): File name for the converted markdown file. If left empty, post id will be the file name automatically. (file format(.md) should also be specified.)
timezone (str): Timezone for a posted time/date. Default is UTC.
'''
for key in kwargs:
if key not in ('file_name', 'timezone'):
raise exceptions.reddit2mdValueError("Unknown parameter(s) is/are given.")
if not post.is_self:
raise exceptions.reddit2mdValueError("Only selftext posts are supported.")
if not kwargs.get('file_name'):
file_name = "{}.md".format(post.id)
else:
file_name = kwargs.get('file_name')
md_splitted = post.selftext.splitlines()
result_md = []
for line in md_splitted:
if utils.is_url_image(line):
line = ''.format(url = line)
result_md.append(line)
result = {
"title": post.title,
"author": post.author.name,
"selftext": result_md,
"url": utils.make_reddit_url(post.permalink),
"created_date": utils.get_posted_date(post),
"subreddit": post.subreddit.display_name
}
author_as_link = "[{}]({})".format(result['author'], utils.make_reddit_url("u/{}".format(result['author'])))
if kwargs.get('timezone'):
timezone = kwargs.get('timezone')
if not timezone in pytz.all_timezones:
raise reddit2mdValueError("Timezone {} is not listed in pytz.")
tz_converted = pytz.timezone(timezone)
result['created_date'] = tz_converted.fromutc(result['created_date'])
else:
timezone = 'UTC'
try:
f = open(file_name, "x")
f.write("# {}".format(result["title"]) + '\n\n')
for l in result["selftext"]:
f.write(l + '\n')
f.write('\n' + "---" + '\n')
f.write("Author: {}".format(author_as_link) + '\n\n')
f.write("URL: {}".format(result["url"]) + '\n\n')
f.write("Created: {} ({})".format(result["created_date"], timezone) + '\n\n')
f.write("Subreddit: r/{}".format(result["subreddit"]))
except OSError as e:
raise exceptions.reddit2mdOSError("OSError during file creation: {}".format(e))
|
[
"pytz.timezone"
] |
[((1815, 1838), 'pytz.timezone', 'pytz.timezone', (['timezone'], {}), '(timezone)\n', (1828, 1838), False, 'import praw, datetime, pytz\n')]
|
""" This module contains classes that preprocess the data. """
import calendar
from datetime import datetime
from util.util_functions import *
class CSVProcessor:
""" Class that contains methods to process csv data. """
def fix_date(self, input_date):
""" Function """
months = {v: k for k, v in enumerate(calendar.month_abbr)}
input_date = str(input_date)
if input_date == "nan":
return datetime(1970, 1, 1).strftime('%Y-%m-%d')
if "-" in input_date:
elements = input_date.split("-")
if " " in elements[0].strip():
new_elements = elements[0].strip().split(" ")
year = int(new_elements[0].strip())
month_index = int(months[new_elements[1].strip()])
else:
year = int(elements[0].replace("[", "").replace("'", "").strip())
month_index = int(elements[1].strip())
return datetime(year, month_index, 1).strftime('%Y-%m-%d')
if len(input_date) == 4:
return datetime(int(input_date), 1, 1).strftime('%Y-%m-%d')
if " " in input_date and "[" not in input_date:
elements = input_date.split(" ")
year = int(elements[0].strip())
if elements[1].strip() == "Spring":
elements[1] = "Mar"
elif elements[1].strip() == "Summer":
elements[1] = "Jul"
elif elements[1].strip() == "Autumn" or elements[1].strip() == "Fall":
elements[1] = "Oct"
elif elements[1].strip() == "Winter":
elements[1] = "Dec"
if "-" in elements[1].strip():
month_index = int(months[elements[1].strip().split("-")[0]])
else:
month_index = int(months[elements[1].strip()])
return datetime(year, month_index, 1).strftime('%Y-%m-%d')
print(input_date)
return None
|
[
"datetime.datetime"
] |
[((445, 465), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (453, 465), False, 'from datetime import datetime\n'), ((961, 991), 'datetime.datetime', 'datetime', (['year', 'month_index', '(1)'], {}), '(year, month_index, 1)\n', (969, 991), False, 'from datetime import datetime\n'), ((1863, 1893), 'datetime.datetime', 'datetime', (['year', 'month_index', '(1)'], {}), '(year, month_index, 1)\n', (1871, 1893), False, 'from datetime import datetime\n')]
|
import random
import sentry_sdk
sentry_sdk.init("https://8e577dc11e2743b4bb2581da3bbcc5a9@sentry.io/1307203")
def answer(question, printable=False):
"""Simulates a magic 8 ball answer to a user's question.
Keyword arguments:
question -- a question made by the user
printable -- prints answer when True, returns it otherwise (default: False)
"""
answers = [
"It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."
]
random.seed(question)
answer = random.choice(answers)
if not printable:
return answer
print(answer)
if __name__ == "__main__":
answer(input("QUESTION: "), printable=True)
|
[
"sentry_sdk.init",
"random.seed",
"random.choice"
] |
[((33, 110), 'sentry_sdk.init', 'sentry_sdk.init', (['"""https://8e577dc11e2743b4bb2581da3bbcc5a9@sentry.io/1307203"""'], {}), "('https://8e577dc11e2743b4bb2581da3bbcc5a9@sentry.io/1307203')\n", (48, 110), False, 'import sentry_sdk\n'), ((977, 998), 'random.seed', 'random.seed', (['question'], {}), '(question)\n', (988, 998), False, 'import random\n'), ((1012, 1034), 'random.choice', 'random.choice', (['answers'], {}), '(answers)\n', (1025, 1034), False, 'import random\n')]
|
#!/usr/bin/evn python
from __future__ import print_function
import sys
import os
import glob
import fileinput
import socket
import commands
os.system("""echo "TFile.Recover 0" >> .rootrc""")
import ROOT
from argparse import ArgumentParser
from pprint import pprint
#--------------------------------------------------
# user information
email = "<EMAIL>"
# set general parameters
memory = 2000
# flavor = "tomorrow"
MaxRuntime = 5*60*60
metadata = "inputSampleList.txt"
outDir = "/eos/atlas/user/a/ahasib/public/Simul-FastCalo/ParametrizationProductionVer07/"
plotDir = "/eos/user/a/ahasib/www/Simul-FastCalo/ParametrizationProductionVer07/"
version = "ver07"
runEpara_temp = "runEpara_temp.C"
runMeanRZ_temp = "runMeanRZ_temp.C"
runShape_temp = "runShape_temp.C"
sub_temp = "template_submit.sub"
exe_temp = "template_run.sh"
# energy parametrization related parameters
npca_primary = 5
npca_secondary = 1
do_validation = 1
init_epara = "initTFCSAnalyzer.C"
run_epara = "run_epara.cxx"
# shape parametrization related parameters
energy_cutoff = 0.9995
dsid_zv0 = -999
do_phiSymm = 1
do_ZvertexStudies = 0
init_shape = "initTFCSAnalyzer.C"
run_shape = "runTFCS2DParametrizationHistogram.cxx"
#----------------------------------------------------------
def getArguments():
"""Get arguments from command line."""
parser = ArgumentParser()
parser.add_argument("-i", "--input", nargs='+', help="Text files containing a list of DSIDs you want to submit")
parser.add_argument("-c", "--checkOnly", action='store_true', help="Flag for only checking if the condor jobs ran successfuly and produced the parametrizations. You would need to provide a list of DSIDs.")
parser.add_argument("-w", "--writeFile", default=None, help="Write a txt file with DSIDs of jobs that are broken.")
return parser.parse_args()
def getListOfDSIDs(inFile):
dsids=[]
if inFile:
with open(inFile) as DSIDList:
for DSID in DSIDList:
DSID = DSID.strip("\n")
dsids.append(DSID)
return dsids
def checkParametrizations(destination, inFile):
failed_jobs = {}
if inFile:
with open(inFile) as DSIDList:
for DSID in DSIDList:
status = []
DSID = DSID.strip("\n")
shapefile = "mc16_13TeV."+DSID+".*.shapepara.*.root"
energyfile = "mc16_13TeV."+DSID+".*.secondPCA.*.root"
extrapolfile = "mc16_13TeV."+DSID+".*.extrapol.*.root"
shapefileNames = commands.getoutput("ls {destination}/{dataset}".format(destination=destination,dataset=shapefile))
energyfileNames = commands.getoutput("ls {destination}/{dataset}".format(destination=destination,dataset=energyfile))
extrapolfileNames = commands.getoutput("ls {destination}/{dataset}".format(destination=destination,dataset=extrapolfile))
print("Checking: ", shapefileNames, energyfileNames, extrapolfileNames, sep="\n")
print("=====================================================================")
if "cannot" in shapefileNames or "cannot" in energyfileNames or "cannot" in extrapolfileNames:
if "cannot" in shapefileNames:
status.append("shape file doesn't exist")
if "cannot" in energyfileNames:
status.append("energy file doesn't exist")
if "cannot" in extrapolfileNames:
status.append("extrapol file doesn't exist")
else:
fshape = ROOT.TFile(shapefileNames)
if fshape.IsZombie():
status.append("shape file corrupted")
fshape.Close()
fenergy = ROOT.TFile(energyfileNames)
if fenergy.IsZombie():
status.append("energy file corrupted")
fenergy.Close()
fextrapol = ROOT.TFile(extrapolfileNames)
if fextrapol.IsZombie():
status.append("extrapol file corrupted")
fextrapol.Close()
if status:
failed_jobs[DSID] = status
return failed_jobs
def main():
options = getArguments()
# check to see if running on LXPLUS
try:
host = socket.gethostname()
if not 'lxplus' in host and not options.checkOnly:
raise Exception
except Exception:
print("Condor jobs have to be submitted from a LXPLUS node")
sys.exit(1)
# check if a list of dsids is provided
if not options.input:
print("You need to provide a list of DSIDs you want to run on")
sys.exit(1)
# for each input dsid lists
if not options.checkOnly:
print("Running Condor Submission ...")
for inFile in options.input:
dsids = getListOfDSIDs(inFile)
# for each dsid copy, modify all files and submit
for dsid in dsids:
dir = "run/dsid_" + str(dsid)
jobname = "runDSID"+"_"+str(dsid)
run = jobname
script_exe = run + ".sh"
script_sub = "run.sub"
script_runEpara = "runEpara.C"
script_runShape = "runShape.C"
script_runMeanRZ = "runMeanRZ.C"
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
os.system("echo ")
os.system("echo \"Current directory :\" $PWD ")
# cp exe_temp and modify
os.system("cp ..\/..\/"+exe_temp+" "+script_exe)
cwd = os.getcwd()
with open(script_exe, 'r') as f:
text = f.read()
text = text.replace('@SUBMIT_DIR@', cwd)
with open(script_exe, 'w') as f:
f.write(text)
os.system("chmod +x "+script_exe)
# cp sub_temp and modify
os.system("cp ..\/..\/"+sub_temp+" "+script_sub)
with open(script_sub, 'r') as f:
text = f.read()
text = text.replace('@EXE@', script_exe)
text = text.replace('@RUN@', run)
text = text.replace('@MEM@', str(memory))
text = text.replace('@MAXRUNTIME@', str(MaxRuntime))
text = text.replace('@EMAIL@', email)
with open(script_sub, 'w') as f:
f.write(text)
# cp runEpara_temp and modify
os.system("cp ..\/..\/"+runEpara_temp+" "+script_runEpara)
with open(script_runEpara, 'r') as f:
text = f.read()
text = text.replace('@DSID@', str(dsid))
text = text.replace('@METADATA@', '\\"'+metadata+'\\"')
text = text.replace('@DIR@', '\\"'+outDir+'\\"')
text = text.replace('@NPCA1@', str(npca_primary))
text = text.replace('@NPCA2@', str(npca_secondary))
text = text.replace('@VALIDATION@', str(do_validation))
text = text.replace('@VER@', '\\"'+version+'\\"')
text = text.replace('@PLOTDIR@', '\\"'+plotDir+'\\"')
with open(script_runEpara, 'w') as f:
f.write(text)
# cp runMeanRZ_temp and modify
os.system("cp ..\/..\/"+runMeanRZ_temp+" "+script_runMeanRZ)
with open(script_runMeanRZ, 'r') as f:
text = f.read()
text = text.replace('@DSID@', str(dsid))
text = text.replace('@DSIDZV0@', str(dsid_zv0))
text = text.replace('@METADATA@', '\\"'+metadata+'\\"')
text = text.replace('@DIR@', '\\"'+outDir+'\\"')
text = text.replace('@VER@', '\\"'+version+'\\"')
text = text.replace('@CUTOFF@', str(energy_cutoff))
text = text.replace('@PLOTDIR@', '\\"'+plotDir+'\\"')
text = text.replace('@DO2DPARAM@', str(0))
text = text.replace('@PHISYMM@', str(do_phiSymm))
text = text.replace('@DOMEANRZ@', str(1))
text = text.replace('@USEMEANRZ@', str(0))
text = text.replace('@DOZVERTEXSTUDIES@', str(do_ZvertexStudies))
with open(script_runMeanRZ, 'w') as f:
f.write(text)
# cp runShape_temp and modify
os.system("cp ..\/..\/"+runShape_temp+" "+script_runShape)
with open(script_runShape, 'r') as f:
text = f.read()
text = text.replace('@DSID@', str(dsid))
text = text.replace('@DSIDZV0@', str(dsid_zv0))
text = text.replace('@METADATA@', '\\"'+metadata+'\\"')
text = text.replace('@DIR@', '\\"'+outDir+'\\"')
text = text.replace('@VER@', '\\"'+version+'\\"')
text = text.replace('@CUTOFF@', str(energy_cutoff))
text = text.replace('@PLOTDIR@', '\\"'+plotDir+'\\"')
text = text.replace('@DO2DPARAM@', str(1))
text = text.replace('@PHISYMM@', str(do_phiSymm))
text = text.replace('@DOMEANRZ@', str(0))
text = text.replace('@USEMEANRZ@', str(1))
text = text.replace('@DOZVERTEXSTUDIES@', str(do_ZvertexStudies))
with open(script_runShape, 'w') as f:
f.write(text)
# cp init_epara and run_epara
os.system("cp ..\/..\/"+init_epara+" "+init_epara)
os.system("cp ..\/..\/"+run_epara+" "+run_epara)
# cp init_shape and run_shape
os.system("cp ..\/..\/"+init_shape+" "+init_shape)
os.system("cp ..\/..\/"+run_shape+" "+run_shape)
# cp metadata file
os.system("cp ..\/..\/"+metadata+" "+metadata)
# submit script_sub
CondorSubmitCommand = "condor_submit "+script_sub
print (CondorSubmitCommand)
os.system(CondorSubmitCommand)
os.chdir("../../")
os.system("echo \"Current directory :\" $PWD ")
if options.checkOnly:
print("Checking condor jobs...")
failedJobs=[]
for inFile in options.input:
failedJobs = checkParametrizations(outDir, inFile)
pprint(failedJobs)
if options.writeFile:
failed_dsid = failedJobs.keys()
print("writing the dsids in a txt file...")
with open(options.writeFile, 'w') as outFile:
outFile.write('\n'.join(failed_dsid))
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"os.makedirs",
"os.getcwd",
"os.path.exists",
"os.system",
"socket.gethostname",
"ROOT.TFile",
"pprint.pprint",
"os.chdir",
"sys.exit"
] |
[((140, 186), 'os.system', 'os.system', (['"""echo "TFile.Recover 0" >> .rootrc"""'], {}), '(\'echo "TFile.Recover 0" >> .rootrc\')\n', (149, 186), False, 'import os\n'), ((1339, 1355), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1353, 1355), False, 'from argparse import ArgumentParser\n'), ((4219, 4239), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4237, 4239), False, 'import socket\n'), ((4589, 4600), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4597, 4600), False, 'import sys\n'), ((4427, 4438), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4435, 4438), False, 'import sys\n'), ((10393, 10411), 'pprint.pprint', 'pprint', (['failedJobs'], {}), '(failedJobs)\n', (10399, 10411), False, 'from pprint import pprint\n'), ((5336, 5349), 'os.chdir', 'os.chdir', (['dir'], {}), '(dir)\n', (5344, 5349), False, 'import os\n'), ((5366, 5384), 'os.system', 'os.system', (['"""echo """'], {}), "('echo ')\n", (5375, 5384), False, 'import os\n'), ((5401, 5446), 'os.system', 'os.system', (['"""echo "Current directory :" $PWD """'], {}), '(\'echo "Current directory :" $PWD \')\n', (5410, 5446), False, 'import os\n'), ((5506, 5562), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + exe_temp + ' ' + script_exe)"], {}), "('cp ..\\\\/..\\\\/' + exe_temp + ' ' + script_exe)\n", (5515, 5562), False, 'import os\n'), ((5577, 5588), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5586, 5588), False, 'import os\n'), ((5830, 5865), 'os.system', 'os.system', (["('chmod +x ' + script_exe)"], {}), "('chmod +x ' + script_exe)\n", (5839, 5865), False, 'import os\n'), ((5921, 5977), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + sub_temp + ' ' + script_sub)"], {}), "('cp ..\\\\/..\\\\/' + sub_temp + ' ' + script_sub)\n", (5930, 5977), False, 'import os\n'), ((6488, 6554), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + runEpara_temp + ' ' + script_runEpara)"], {}), "('cp ..\\\\/..\\\\/' + runEpara_temp + ' ' + script_runEpara)\n", (6497, 6554), False, 'import os\n'), ((7325, 7393), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + runMeanRZ_temp + ' ' + script_runMeanRZ)"], {}), "('cp ..\\\\/..\\\\/' + runMeanRZ_temp + ' ' + script_runMeanRZ)\n", (7334, 7393), False, 'import os\n'), ((8416, 8482), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + runShape_temp + ' ' + script_runShape)"], {}), "('cp ..\\\\/..\\\\/' + runShape_temp + ' ' + script_runShape)\n", (8425, 8482), False, 'import os\n'), ((9504, 9562), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + init_epara + ' ' + init_epara)"], {}), "('cp ..\\\\/..\\\\/' + init_epara + ' ' + init_epara)\n", (9513, 9562), False, 'import os\n'), ((9571, 9627), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + run_epara + ' ' + run_epara)"], {}), "('cp ..\\\\/..\\\\/' + run_epara + ' ' + run_epara)\n", (9580, 9627), False, 'import os\n'), ((9682, 9740), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + init_shape + ' ' + init_shape)"], {}), "('cp ..\\\\/..\\\\/' + init_shape + ' ' + init_shape)\n", (9691, 9740), False, 'import os\n'), ((9749, 9805), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + run_shape + ' ' + run_shape)"], {}), "('cp ..\\\\/..\\\\/' + run_shape + ' ' + run_shape)\n", (9758, 9805), False, 'import os\n'), ((9849, 9903), 'os.system', 'os.system', (["('cp ..\\\\/..\\\\/' + metadata + ' ' + metadata)"], {}), "('cp ..\\\\/..\\\\/' + metadata + ' ' + metadata)\n", (9858, 9903), False, 'import os\n'), ((10059, 10089), 'os.system', 'os.system', (['CondorSubmitCommand'], {}), '(CondorSubmitCommand)\n', (10068, 10089), False, 'import os\n'), ((10107, 10125), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (10115, 10125), False, 'import os\n'), ((10142, 10187), 'os.system', 'os.system', (['"""echo "Current directory :" $PWD """'], {}), '(\'echo "Current directory :" $PWD \')\n', (10151, 10187), False, 'import os\n'), ((3507, 3533), 'ROOT.TFile', 'ROOT.TFile', (['shapefileNames'], {}), '(shapefileNames)\n', (3517, 3533), False, 'import ROOT\n'), ((3675, 3702), 'ROOT.TFile', 'ROOT.TFile', (['energyfileNames'], {}), '(energyfileNames)\n', (3685, 3702), False, 'import ROOT\n'), ((3849, 3878), 'ROOT.TFile', 'ROOT.TFile', (['extrapolfileNames'], {}), '(extrapolfileNames)\n', (3859, 3878), False, 'import ROOT\n'), ((5262, 5281), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (5276, 5281), False, 'import os\n'), ((5303, 5319), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (5314, 5319), False, 'import os\n')]
|
# from io import open_code
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
def account_info():
with open('data.txt','r') as rd:
info = rd.read().split()
email = info[0]
password = info[1]
return email , password
email , password = account_info()
print(password)
tweet = "hlw world this is JARVIS here , who tweet too..."
options = Options()
options.add_argument("start-maximized")
driver = webdriver.Chrome(options=options)
driver.get(r'https://twitter.com/login')
email_xpath = '//*[@id="react-root"]/div/div/div[2]/main/div/div/div[2]/form/div/div[1]/label/div/div[2]/div/input'
passward_xpath = '//*[@id="react-root"]/div/div/div[2]/main/div/div/div[2]/form/div/div[2]/label/div/div[2]/div/input'
login_xpath = '//*[@id="react-root"]/div/div/div[2]/main/div/div/div[2]/form/div/div[3]/div/div'
time.sleep(2)
driver.find_element_by_xpath(email_xpath).send_keys(email)
time.sleep(0.5)
driver.find_element_by_xpath(passward_xpath).send_keys(password)
time.sleep(0.5)
driver.find_element_by_xpath(login_xpath).click()
time.sleep(0.5)
tweet_xpath = '//*[@id="react-root"]/div/div/div[2]/header/div/div/div/div[1]/div[3]/a/div'
type_xpath = '//*[@id="layers"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[1]/div/div/div/div/div[2]/div[1]/div/div/div/div/div/div/div/div/label/div[1]/div/div/div/div/div[2]/div/div/div/div'
post_xpath = '//*[@id="layers"]/div[2]/div/div/div/div/div/div[2]/div[2]/div/div[3]/div/div/div/div[1]/div/div/div/div/div[2]/div[3]/div/div/div[2]/div[4]/div/span/span'
time.sleep(2)
driver.find_element_by_xpath(tweet_xpath).click()
time.sleep(0.5)
driver.find_element_by_xpath(type_xpath).send_keys(tweet)
time.sleep(0.5)
driver.find_element_by_xpath(post_xpath).click()
time.sleep(0.5)
|
[
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((466, 475), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (473, 475), False, 'from selenium.webdriver.chrome.options import Options\n'), ((525, 558), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (541, 558), False, 'from selenium import webdriver\n'), ((933, 946), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (943, 946), False, 'import time\n'), ((1007, 1022), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1017, 1022), False, 'import time\n'), ((1088, 1103), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1098, 1103), False, 'import time\n'), ((1154, 1169), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1164, 1169), False, 'import time\n'), ((1652, 1665), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1662, 1665), False, 'import time\n'), ((1717, 1732), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1727, 1732), False, 'import time\n'), ((1792, 1807), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1802, 1807), False, 'import time\n'), ((1858, 1873), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1868, 1873), False, 'import time\n')]
|
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.gridspec as gridspec
from IPython.core.display import HTML
import numpy as np
import math
def animate(sequences, interval=100, blit=True, fig_size=(14, 10), get_fig=False):
if isinstance(sequences, list) or isinstance(sequences, np.ndarray):
fig, ax = plt.subplots(1, 1)
animate = [[ax.imshow(np.squeeze(_), cmap='gray')] for _ in sequences]
elif isinstance(sequences, zip):
animate = []
for i, el in enumerate(sequences):
seq = []
if i == 0:
nb_el = len(el)
nb_col = 2
nb_row = math.ceil(nb_el / nb_col)
fig, ax = plt.subplots(nb_row, nb_col)
for j in range(len(el)):
col = int(j % 2 != 0)
row = j // nb_col
if nb_row == 1:
seq.append(ax[col].imshow(np.squeeze(el[j]), cmap='gray'))
else:
seq.append(ax[row, col].imshow(np.squeeze(el[j]), cmap='gray'))
animate.append(seq)
else:
raise ValueError("Expected type is zip, list or numpy.ndarray, got ", type(sequences))
fig.set_size_inches(*fig_size)
anim = animation.ArtistAnimation(fig, animate, interval=interval, blit=blit)
if not get_fig:
return anim
else:
return anim, fig
def html_animation(sequences, interval=100, blit=True, fig_size=(14, 10)):
anim = animate(sequences, interval, blit, fig_size)
return HTML(anim.to_html5_video())
def plot_results(batch, fig_size=(14, 10)):
if batch.shape[0] == 1 or batch.ndim in [2, 3]:
fig, ax = plt.subplots(1, 1)
ax.imshow(np.squeeze(batch), cmap='gray')
else:
nb_el = batch.shape[0]
nb_col = 2
nb_row = math.ceil(nb_el / nb_col)
fig, ax = plt.subplots(nb_row, nb_col)
for j in range(nb_el):
col = int(j % 2 != 0)
row = j // nb_col
if nb_row == 1:
ax[col].imshow(np.squeeze(batch[j]), cmap='gray')
else:
ax[row, col].imshow(np.squeeze(batch[j]), cmap='gray')
fig.set_size_inches(*fig_size)
fig.show()
import matplotlib
import matplotlib.cm
class VisuResultsClassification:
def __init__(self,
x,
sequences=None,
bar=None,
graph=None,
fill=None,
interval=50,
figsize=(14, 12),
sequences_titles=None,
graph_titles=None,
fill_titles=None):
"""
Sequence
:param x:
:param sequences: List of arrays to be plotted as animated sequences
:param bar: List of tuple of arrays to be plotted as bar
:param graph: List of arrays to be plotted as curves
:param fill: List of arrays to be plotted as filled curves
For graph and fill arguments, instead of a list of lists, you can provide a list of dict, each dict being plot on
the same graph, with the key being the label legend
For bar argument, each tuple should be organized as: (yProba, yGroundtruth, nb_class, *labels[optional])
"""
assert (bar is None and graph is None and fill is None,
"You have to provide at least one of the following argument: bar, graph, fill")
bar = self.init_arg(bar)
graph = self.init_arg(graph)
sequences = self.init_arg(sequences)
sequences_titles = self.init_arg(sequences_titles)
fill = self.init_arg(fill)
graph_titles = self.init_arg(graph_titles)
fill_titles = self.init_arg(fill_titles)
self.x = x
self.length = len(x)
self.sequences = [self.normalize_array(np.squeeze(array)) for array in sequences]
self.fig = plt.figure(figsize=figsize)
norm = matplotlib.colors.Normalize(vmin=0, vmax=20)
cmap = matplotlib.cm.get_cmap('tab20')
self.colors = [cmap(norm(_)) for _ in np.arange(0, 20, 1)]
nb_plots = len(sequences) + len(graph) + len(bar) + len(fill)
self.outer = gridspec.GridSpec(math.ceil(nb_plots / 2), 2, wspace=0.1, hspace=0.25)
iter_subplot = 0
# Images
self.array_axs = []
self.image_plts = []
for seq in self.sequences:
self.array_axs.append(self.fig.add_subplot(self.outer[iter_subplot]))
self.image_plts.append(self.array_axs[-1].imshow(seq[0], cmap='gray'))
iter_subplot += 1
plt.axis('off')
for j, title in enumerate(sequences_titles):
self.array_axs[j].title.set_text(title)
# Curves
self.graph_axs = []
self.graph_vertical_lines = []
for arrays in graph:
graph_ax = self.fig.add_subplot(self.outer[iter_subplot])
if isinstance(arrays, dict):
for key in arrays:
graph_ax.plot(self.x, self.pad_missing_value(arrays[key]), label=key, color=self._get_new_color())
else:
graph_ax.plot(self.x, self.pad_missing_value(arrays), color=self._get_new_color())
iter_subplot += 1
plt.xticks(rotation=25)
graph_ax.legend()
self.graph_vertical_lines.append(graph_ax.axvline(x=self.x[0], color='k', linestyle=':'))
self.graph_axs.append(graph_ax)
for j, title in enumerate(graph_titles):
self.graph_axs[j].title.set_text(title)
self.fill_axs = []
self.fill_vertical_lines = []
for arrays in fill:
fill_ax = self.fig.add_subplot(self.outer[iter_subplot])
if isinstance(arrays, dict):
for key in arrays:
color = self._get_new_color()
fill_ax.plot(self.x, self.pad_missing_value(arrays[key]), label=key, color=color)
color[-1] = 0.5
filledArray = arrays[key]
filledArray[0] = 0
filledArray[-1] = 0
fill_ax.fill(self.x, self.pad_missing_value(filledArray), color=color)
else:
color = self._get_new_color()
fill_ax.plot(self.x, self.pad_missing_value(arrays), color=color)
color[-1] = 0.5
arrays[0] = 0
arrays[-1] = 0
fill_ax.fill(self.x, self.pad_missing_value(arrays), color=color)
plt.xticks(rotation=25)
fill_ax.legend()
self.fill_vertical_lines.append(fill_ax.axvline(x=self.x[0], color='k', linestyle=':'))
self.fill_axs.append(fill_ax)
iter_subplot += 1
for j, title in enumerate(fill_titles):
self.fill_axs[j].title.set_text(title)
# bar
self.bars = bar
self.bar_axs = []
for arrays in self.bars:
bar_ax = self.fig.add_subplot(self.outer[iter_subplot])
self.fill_bar(bar_ax, arrays, 0)
self.bar_axs.append(bar_ax)
iter_subplot += 1
self.anim = animation.FuncAnimation(self.fig, self._animate,
frames=np.arange(self.length),
interval=interval)
def init_arg(self, arg):
if arg is None:
arg = []
elif not isinstance(arg, list):
arg = [arg]
return arg
def pad_missing_value(self, array):
missing_value = max(0, self.length-len(array))
return np.pad(array, (0, missing_value), 'constant')
def fill_bar(self, bar_ax, tuple_array, timestamp):
labels = None
nb_class = tuple_array[2]
if len(tuple_array) == 4:
labels = tuple_array[-1]
tuple_array = tuple_array[:2]
proba = tuple_array[0]
gt = tuple_array[1]
if timestamp < len(gt):
pred = np.argmax(proba, axis=1)[timestamp]
gt = gt[timestamp]
color = self._get_bar_colors(pred, gt, nb_class)
bar_ax.bar(np.arange(nb_class), proba[timestamp], color=color, width=0.5)
bar_ax.set_xticks(np.arange(nb_class))
if labels is not None:
bar_ax.set_xticklabels(labels)
else:
color = [.7, .7, .7]
bar_ax.bar(np.arange(nb_class), np.zeros(nb_class), color=color, width=0.5)
bar_ax.set_xticks(np.arange(nb_class))
if labels is not None:
bar_ax.set_xticklabels(labels)
def normalize_array(self, x):
x -= x.min()
x /= x.max()
return x
def _get_new_color(self):
self.colors = np.roll(self.colors, -1)
return self.colors[0]
def _get_bar_colors(self, prediction, ground_truth, nb_class):
neutral = [.7, .7, .7]
incorrect = [.7, 0, 0]
correct = [0, .8, 0]
colors = [neutral] * nb_class
prediction = int(round(prediction))
if prediction == ground_truth:
colors[prediction] = correct
else:
colors[prediction] = incorrect
return colors
def _animate(self, i):
for j, plot in enumerate(self.image_plts):
plot.set_data(self.sequences[j][i])
for verticalLine in self.graph_vertical_lines:
verticalLine.set_data([self.x[i], self.x[i]], [0, 1])
for verticalLine in self.fill_vertical_lines:
verticalLine.set_data([self.x[i], self.x[i]], [0, 1])
plt.axis('off')
for bar_ax, bar_array in zip(self.bar_axs, self.bars):
bar_ax.clear()
self.fill_bar(bar_ax, bar_array, i)
def html_anim(self):
return HTML(self.anim.to_html5_video())
|
[
"numpy.pad",
"matplotlib.colors.Normalize",
"math.ceil",
"matplotlib.cm.get_cmap",
"numpy.roll",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.squeeze",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] |
[((1273, 1342), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'animate'], {'interval': 'interval', 'blit': 'blit'}), '(fig, animate, interval=interval, blit=blit)\n', (1298, 1342), False, 'from matplotlib import animation\n'), ((350, 368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (362, 368), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1725), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1719, 1725), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1879), 'math.ceil', 'math.ceil', (['(nb_el / nb_col)'], {}), '(nb_el / nb_col)\n', (1863, 1879), False, 'import math\n'), ((1898, 1926), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nb_row', 'nb_col'], {}), '(nb_row, nb_col)\n', (1910, 1926), True, 'import matplotlib.pyplot as plt\n'), ((3924, 3951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3934, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3967, 4011), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(20)'}), '(vmin=0, vmax=20)\n', (3994, 4011), False, 'import matplotlib\n'), ((4027, 4058), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (4049, 4058), False, 'import matplotlib\n'), ((7654, 7699), 'numpy.pad', 'np.pad', (['array', '(0, missing_value)', '"""constant"""'], {}), "(array, (0, missing_value), 'constant')\n", (7660, 7699), True, 'import numpy as np\n'), ((8799, 8823), 'numpy.roll', 'np.roll', (['self.colors', '(-1)'], {}), '(self.colors, -1)\n', (8806, 8823), True, 'import numpy as np\n'), ((9635, 9650), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9643, 9650), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1761), 'numpy.squeeze', 'np.squeeze', (['batch'], {}), '(batch)\n', (1754, 1761), True, 'import numpy as np\n'), ((4236, 4259), 'math.ceil', 'math.ceil', (['(nb_plots / 2)'], {}), '(nb_plots / 2)\n', (4245, 4259), False, 'import math\n'), ((4632, 4647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4640, 4647), True, 'import matplotlib.pyplot as plt\n'), ((5293, 5316), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)'}), '(rotation=25)\n', (5303, 5316), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6594), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)'}), '(rotation=25)\n', (6581, 6594), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3879), 'numpy.squeeze', 'np.squeeze', (['array'], {}), '(array)\n', (3872, 3879), True, 'import numpy as np\n'), ((4105, 4124), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (4114, 4124), True, 'import numpy as np\n'), ((7298, 7320), 'numpy.arange', 'np.arange', (['self.length'], {}), '(self.length)\n', (7307, 7320), True, 'import numpy as np\n'), ((8037, 8061), 'numpy.argmax', 'np.argmax', (['proba'], {'axis': '(1)'}), '(proba, axis=1)\n', (8046, 8061), True, 'import numpy as np\n'), ((8188, 8207), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8197, 8207), True, 'import numpy as np\n'), ((8281, 8300), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8290, 8300), True, 'import numpy as np\n'), ((8454, 8473), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8463, 8473), True, 'import numpy as np\n'), ((8475, 8493), 'numpy.zeros', 'np.zeros', (['nb_class'], {}), '(nb_class)\n', (8483, 8493), True, 'import numpy as np\n'), ((8549, 8568), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8558, 8568), True, 'import numpy as np\n'), ((399, 412), 'numpy.squeeze', 'np.squeeze', (['_'], {}), '(_)\n', (409, 412), True, 'import numpy as np\n'), ((678, 703), 'math.ceil', 'math.ceil', (['(nb_el / nb_col)'], {}), '(nb_el / nb_col)\n', (687, 703), False, 'import math\n'), ((730, 758), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nb_row', 'nb_col'], {}), '(nb_row, nb_col)\n', (742, 758), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2102), 'numpy.squeeze', 'np.squeeze', (['batch[j]'], {}), '(batch[j])\n', (2092, 2102), True, 'import numpy as np\n'), ((2171, 2191), 'numpy.squeeze', 'np.squeeze', (['batch[j]'], {}), '(batch[j])\n', (2181, 2191), True, 'import numpy as np\n'), ((948, 965), 'numpy.squeeze', 'np.squeeze', (['el[j]'], {}), '(el[j])\n', (958, 965), True, 'import numpy as np\n'), ((1054, 1071), 'numpy.squeeze', 'np.squeeze', (['el[j]'], {}), '(el[j])\n', (1064, 1071), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import re
import uuid
import pathlib
import eventlet
import subprocess
from urllib.parse import urlparse
from eventlet.green.subprocess import Popen
from flask import Flask, send_from_directory, jsonify, request
from flask_socketio import SocketIO, emit
from flask_cors import CORS
requests = eventlet.import_patched('requests')
app = Flask(__name__)
app.config.from_json('config.json')
socketio = SocketIO(app, cors_allowed_origins='*', logger=True,
engineio_logger=True, manage_session=True)
cors = CORS(app, resources={r'/retrieve/*': {'origins': '*'}})
def is_valid_url(url):
pattern = re.compile(
r'^https?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'
r'localhost|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?(?:/?|[/?]\S+)$', re.IGNORECASE)
match = re.search(pattern, url)
return match
def parse_git_url(url):
parse = urlparse(url)
if parse.netloc != 'github.com':
return url
p = parse.path.split('/')
c = len(p)
j = c - 5
k = c - j - c
if not c >= k:
return url
user = p[1]
repo = p[2]
path = '/'.join(p[k:])
base = 'https://api.github.com/repos/'
api_url = '{}{}/{}/contents/{}'.format(base, user, repo, path)
session = requests.Session()
session.auth = (app.config['GITHUB_USER'],
app.config['GITHUB_TOKEN'])
res = session.get(api_url)
if res.status_code == requests.codes.ok:
response = res.json()
return response['download_url']
return url
@app.route('/')
def index():
return jsonify({'message': 'Thanks for visiting api.reedo.me'}), 404
@app.route('/retrieve', methods=['POST'])
def retrieve_file():
cwd = pathlib.Path.cwd()
req = request.get_json()
return send_from_directory(directory=cwd, filename=req['file'])
def convert_and_stream(type, url):
cwd = pathlib.Path.cwd()
target = '/home/latex/data/output'
container = 'opendatacoder/markdown:latest'
executable = '/usr/bin/docker'
output = str(uuid.uuid4()).split('-')[-1]
command = ['{}'.format(executable), 'run', '--rm', '-v',
'{}:{}'.format(cwd, target), '{}'.format(container),
'--format', '{}'.format(type), '--output', '{}'.format(output),
'--input', '{}'.format(url)]
instance = Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines_std_out = iter(instance.stdout.readline, b'')
for line in lines_std_out:
socketio.sleep(0)
data = line.decode('utf8').rstrip('\n')
socketio.emit('my_response', {'data': str(data)})
socketio.emit('done', {'file': '{}.pdf'.format(output)})
@socketio.on('instance')
def tasks_threaded(format, url):
url = parse_git_url(url)
if not url or not is_valid_url(url):
emit('my_response', {'data': 'yolo specify a valid source url'})
return None
socketio.start_background_task(convert_and_stream, format, url)
emit('my_response', {'data': 'initiating process'})
emit('my_response', {'data': 'parsing {}'.format(url)})
@socketio.on('loaded')
def test_message():
emit('my_response', {'data': 'connected to terminal'})
if __name__ == '__main__':
socketio.run(app, debug=True)
|
[
"uuid.uuid4",
"pathlib.Path.cwd",
"flask_cors.CORS",
"flask.Flask",
"eventlet.green.subprocess.Popen",
"flask.jsonify",
"flask_socketio.emit",
"flask_socketio.SocketIO",
"flask.send_from_directory",
"eventlet.import_patched",
"re.search",
"flask.request.get_json",
"urllib.parse.urlparse",
"re.compile"
] |
[((319, 354), 'eventlet.import_patched', 'eventlet.import_patched', (['"""requests"""'], {}), "('requests')\n", (342, 354), False, 'import eventlet\n'), ((362, 377), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (367, 377), False, 'from flask import Flask, send_from_directory, jsonify, request\n'), ((425, 524), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'cors_allowed_origins': '"""*"""', 'logger': '(True)', 'engineio_logger': '(True)', 'manage_session': '(True)'}), "(app, cors_allowed_origins='*', logger=True, engineio_logger=True,\n manage_session=True)\n", (433, 524), False, 'from flask_socketio import SocketIO, emit\n'), ((548, 602), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/retrieve/*': {'origins': '*'}}"}), "(app, resources={'/retrieve/*': {'origins': '*'}})\n", (552, 602), False, 'from flask_cors import CORS\n'), ((643, 832), 're.compile', 're.compile', (['"""^https?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+[A-Z]{2,6}\\\\.?|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$"""', 're.IGNORECASE'], {}), "(\n '^https?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+[A-Z]{2,6}\\\\.?|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$'\n , re.IGNORECASE)\n", (653, 832), False, 'import re\n'), ((871, 894), 're.search', 're.search', (['pattern', 'url'], {}), '(pattern, url)\n', (880, 894), False, 'import re\n'), ((951, 964), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (959, 964), False, 'from urllib.parse import urlparse\n'), ((1781, 1799), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (1797, 1799), False, 'import pathlib\n'), ((1810, 1828), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1826, 1828), False, 'from flask import Flask, send_from_directory, jsonify, request\n'), ((1841, 1897), 'flask.send_from_directory', 'send_from_directory', ([], {'directory': 'cwd', 'filename': "req['file']"}), "(directory=cwd, filename=req['file'])\n", (1860, 1897), False, 'from flask import Flask, send_from_directory, jsonify, request\n'), ((1945, 1963), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (1961, 1963), False, 'import pathlib\n'), ((2401, 2465), 'eventlet.green.subprocess.Popen', 'Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (2406, 2465), False, 'from eventlet.green.subprocess import Popen\n'), ((3048, 3099), 'flask_socketio.emit', 'emit', (['"""my_response"""', "{'data': 'initiating process'}"], {}), "('my_response', {'data': 'initiating process'})\n", (3052, 3099), False, 'from flask_socketio import SocketIO, emit\n'), ((3209, 3263), 'flask_socketio.emit', 'emit', (['"""my_response"""', "{'data': 'connected to terminal'}"], {}), "('my_response', {'data': 'connected to terminal'})\n", (3213, 3263), False, 'from flask_socketio import SocketIO, emit\n'), ((1644, 1700), 'flask.jsonify', 'jsonify', (["{'message': 'Thanks for visiting api.reedo.me'}"], {}), "({'message': 'Thanks for visiting api.reedo.me'})\n", (1651, 1700), False, 'from flask import Flask, send_from_directory, jsonify, request\n'), ((2888, 2952), 'flask_socketio.emit', 'emit', (['"""my_response"""', "{'data': 'yolo specify a valid source url'}"], {}), "('my_response', {'data': 'yolo specify a valid source url'})\n", (2892, 2952), False, 'from flask_socketio import SocketIO, emit\n'), ((2103, 2115), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2113, 2115), False, 'import uuid\n')]
|
# mdbook-publisher
# Author: <NAME>
# Date: 1/4/2021
import json
import os
import sys
# Global config
config = {}
def generate(name, path):
with open(path, 'r', encoding='utf-8') as f:
summary_text = '# Summary\n\n'
item_array = f.read().split('\n- ')
print(str(len(item_array) - 1) + ' item(s) found.')
content = None
for item in item_array:
if content == None:
content = item
else:
content = content + '\n\n- ' + item
# Generate single content file
arr = content.split('\n# ')
if arr[0].find('# ') == 0:
arr[0] = arr[0][2:]
for elem in arr:
title_pos_start = title_pos_end = 0
while elem[title_pos_end] != '\n' and title_pos_end < len(elem) - 1:
title_pos_end = title_pos_end + 1
title = elem[title_pos_start : title_pos_end]
with open('projects/' + name + '/src/' + title.replace(' ', '_') + '.md', 'w', encoding='utf-8') as fout:
fout.write('# ' + title + '\n' + elem[title_pos_end:len(elem)])
fout.close()
summary_text = summary_text + '- [' + title + '](./' + title.replace(' ', '_') + '.md)\n'
# Generate SUMMARY.md
with open('projects/' + name + '/src/SUMMARY.md', 'w', encoding='utf-8') as fout:
fout.write(summary_text)
fout.close()
print('Generate success')
os.system('mdbook build projects/' + name)
def upload(name, target, dest):
target_host_info = {}
for host in config['hosts']:
if host['name'] == target:
target_host_info = host
break
if target_host_info == {}:
print('Error: Target host not found')
exit()
cmd = 'sshpass -p \'' + target_host_info['password'] + '\' scp -v -r ./projects/' + name + '/book/* ' + target_host_info['username'] + '@' + target_host_info['address'] + ':' + dest
print(cmd)
os.system(cmd)
def main():
# Load config
global config
with open('mdpub.json', 'r', encoding='utf-8') as f:
config = json.load(f)
# Check and retrieve args
if len(sys.argv) == 2:
if sys.argv[1] == 'help':
print('mdpub (mdbook-publisher)')
print('Copyright 2021 <NAME>. All Rights Reserved.')
print()
print('config add host -- Add host info to config')
print(' add project -- Add project to config')
print(' rm host -- Remove host info from config')
print(' rm project -- Remove project info from config')
print('gen -- Generate mdbook project from source markdown file')
print('list -- List existing projects')
print('update -- Generate from source and upload to target host')
elif sys.argv[1] == 'list':
for project in config['projects']:
print(project['name'], project['src'])
else:
print('Missing argument')
elif len(sys.argv) == 3:
if sys.argv[1] == 'gen' or sys.argv[1] == 'update':
project_name = sys.argv[2]
# Create if project directory not exists
if not os.path.exists('./projects'):
print('Creating project directory...')
os.mkdir('./projects')
# Intialize project
if (not os.path.exists('projects/' + project_name)):
os.mkdir('projects/' + project_name)
os.system('mdbook init projects/' + project_name)
# Fetch soruce markdown file path
src_path = ''
target_host = ''
dest_path = ''
for project in config['projects']:
if (project['name'] == project_name):
src_path = project['src']
target_host = project['host']
dest_path = project['dest']
break
if src_path != '':
generate(project_name, src_path)
if sys.argv[1] == 'update':
upload(project_name, target_host, dest_path)
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"json.load",
"os.path.exists",
"os.system"
] |
[((1524, 1566), 'os.system', 'os.system', (["('mdbook build projects/' + name)"], {}), "('mdbook build projects/' + name)\n", (1533, 1566), False, 'import os\n'), ((2054, 2068), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2063, 2068), False, 'import os\n'), ((2193, 2205), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2202, 2205), False, 'import json\n'), ((3377, 3405), 'os.path.exists', 'os.path.exists', (['"""./projects"""'], {}), "('./projects')\n", (3391, 3405), False, 'import os\n'), ((3478, 3500), 'os.mkdir', 'os.mkdir', (['"""./projects"""'], {}), "('./projects')\n", (3486, 3500), False, 'import os\n'), ((3554, 3596), 'os.path.exists', 'os.path.exists', (["('projects/' + project_name)"], {}), "('projects/' + project_name)\n", (3568, 3596), False, 'import os\n'), ((3615, 3651), 'os.mkdir', 'os.mkdir', (["('projects/' + project_name)"], {}), "('projects/' + project_name)\n", (3623, 3651), False, 'import os\n'), ((3668, 3717), 'os.system', 'os.system', (["('mdbook init projects/' + project_name)"], {}), "('mdbook init projects/' + project_name)\n", (3677, 3717), False, 'import os\n')]
|
from typing import Mapping, Optional
from textwrap import dedent
import discord
from discord import Color, Embed
from discord.ext.commands import Cog, Command, Group, HelpCommand
from helpers.paginator import Paginator
from .views import HelpCommandMenu
class CustomHelpCommand(HelpCommand):
def __init__(self):
super().__init__(
command_attrs={
"hidden": True,
"help": "Shows help about a category, group, or a command.",
}
)
async def send(self, **kwargs):
return await self.get_destination().send(**kwargs)
async def send_bot_help(
self, mapping: Mapping[Optional[Cog], list[Command]]
) -> None:
description = f"""
Hello! I am bonbons, I was made by sift#0410 around <t:1631859987:R>.
Use the dropdown below to navigate through my modules. If you need help with a specific command, use `{self.context.clean_prefix}help [command]`.
"""
embed = Embed(
title="Help Menu",
description=dedent(description),
color=Color.og_blurple(),
)
view = HelpCommandMenu(self.context, embed)
view.msg = await self.send(
embed=embed,
view=view,
)
async def paginate(
self, title: str, description: str, commands, *, per_page: int
) -> None:
embeds = []
for number in range(0, len(commands), per_page):
embed = discord.Embed(
title=title,
description=description,
colour=discord.Color.og_blurple(),
)
for command in commands[number : number + per_page]:
embed.add_field(
name=command[0],
value=command[1],
inline=False,
)
embeds.append(embed)
for index, embed in enumerate(embeds):
embed.title += f"Page {index+1}/{len(embeds)}"
embed.set_footer(
text=f"Use b!help [command] for more info on a command." # unsure on how I would make the prefix dynamic
)
view = Paginator(self.context, embeds, embed=True)
view.msg = await self.send(embed=embeds[0], view=view)
async def send_help_embed(
self,
title: str,
description: str,
commands,
) -> None:
initial_commands = []
for command in commands:
if isinstance(command, Group):
for subcommand in command.commands:
if subcommand.hidden or not subcommand.enabled:
continue
signature = subcommand.signature
name = subcommand.qualified_name
help = subcommand.description or subcommand.help or "No help found."
initial_commands.append(
(f"{name} {signature}", help)
)
if command.parent or command.hidden or not command.enabled:
continue
signature = command.signature
name = command.qualified_name
help = command.description or command.help or "No help found."
initial_commands.append((f"{name} {signature}", help))
await self.paginate(
title, description, initial_commands, per_page=7
)
async def send_group_help(self, group: Group) -> None:
await self.send_help_embed("Group Help", group.description, group.commands)
async def send_cog_help(self, cog: Group) -> None:
await self.send_help_embed(
"Category Help",
cog.description,
cog.walk_commands(),
)
async def send_command_help(self, command: Command) -> None:
embed = Embed(title="Command Help", color=Color.og_blurple())
description = command.description or command.help or "..."
embed.description = (
f"```\n{command.qualified_name} {command.signature}\n```\n{description}"
)
if command.aliases:
embed.add_field(name="Aliases", value=", ".join(command.aliases))
await self.send(embed=embed)
|
[
"textwrap.dedent",
"helpers.paginator.Paginator",
"discord.Color.og_blurple"
] |
[((2183, 2226), 'helpers.paginator.Paginator', 'Paginator', (['self.context', 'embeds'], {'embed': '(True)'}), '(self.context, embeds, embed=True)\n', (2192, 2226), False, 'from helpers.paginator import Paginator\n'), ((1060, 1079), 'textwrap.dedent', 'dedent', (['description'], {}), '(description)\n', (1066, 1079), False, 'from textwrap import dedent\n'), ((1099, 1117), 'discord.Color.og_blurple', 'Color.og_blurple', ([], {}), '()\n', (1115, 1117), False, 'from discord import Color, Embed\n'), ((3875, 3893), 'discord.Color.og_blurple', 'Color.og_blurple', ([], {}), '()\n', (3891, 3893), False, 'from discord import Color, Embed\n'), ((1593, 1619), 'discord.Color.og_blurple', 'discord.Color.og_blurple', ([], {}), '()\n', (1617, 1619), False, 'import discord\n')]
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.contact import Contact # noqa: F401,E501
import re # noqa: F401,E501
from swagger_server import util
class Housing(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, service_involvement: str=None, housing_association: str=None, contact: Contact=None, tenancy_start: date=None, anti_social_behaviour: str=None, rent_arrears: str=None, notice_seeking_possession: str=None, eviction: str=None): # noqa: E501
"""Housing - a model defined in Swagger
:param service_involvement: The service_involvement of this Housing. # noqa: E501
:type service_involvement: str
:param housing_association: The housing_association of this Housing. # noqa: E501
:type housing_association: str
:param contact: The contact of this Housing. # noqa: E501
:type contact: Contact
:param tenancy_start: The tenancy_start of this Housing. # noqa: E501
:type tenancy_start: date
:param anti_social_behaviour: The anti_social_behaviour of this Housing. # noqa: E501
:type anti_social_behaviour: str
:param rent_arrears: The rent_arrears of this Housing. # noqa: E501
:type rent_arrears: str
:param notice_seeking_possession: The notice_seeking_possession of this Housing. # noqa: E501
:type notice_seeking_possession: str
:param eviction: The eviction of this Housing. # noqa: E501
:type eviction: str
"""
self.swagger_types = {
'service_involvement': str,
'housing_association': str,
'contact': Contact,
'tenancy_start': date,
'anti_social_behaviour': str,
'rent_arrears': str,
'notice_seeking_possession': str,
'eviction': str
}
self.attribute_map = {
'service_involvement': 'serviceInvolvement',
'housing_association': 'housingAssociation',
'contact': 'contact',
'tenancy_start': 'tenancyStart',
'anti_social_behaviour': 'antiSocialBehaviour',
'rent_arrears': 'rentArrears',
'notice_seeking_possession': 'noticeSeekingPossession',
'eviction': 'eviction'
}
self._service_involvement = service_involvement
self._housing_association = housing_association
self._contact = contact
self._tenancy_start = tenancy_start
self._anti_social_behaviour = anti_social_behaviour
self._rent_arrears = rent_arrears
self._notice_seeking_possession = notice_seeking_possession
self._eviction = eviction
@classmethod
def from_dict(cls, dikt) -> 'Housing':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Housing of this Housing. # noqa: E501
:rtype: Housing
"""
return util.deserialize_model(dikt, cls)
@property
def service_involvement(self) -> str:
"""Gets the service_involvement of this Housing.
:return: The service_involvement of this Housing.
:rtype: str
"""
return self._service_involvement
@service_involvement.setter
def service_involvement(self, service_involvement: str):
"""Sets the service_involvement of this Housing.
:param service_involvement: The service_involvement of this Housing.
:type service_involvement: str
"""
self._service_involvement = service_involvement
@property
def housing_association(self) -> str:
"""Gets the housing_association of this Housing.
:return: The housing_association of this Housing.
:rtype: str
"""
return self._housing_association
@housing_association.setter
def housing_association(self, housing_association: str):
"""Sets the housing_association of this Housing.
:param housing_association: The housing_association of this Housing.
:type housing_association: str
"""
self._housing_association = housing_association
@property
def contact(self) -> Contact:
"""Gets the contact of this Housing.
:return: The contact of this Housing.
:rtype: Contact
"""
return self._contact
@contact.setter
def contact(self, contact: Contact):
"""Sets the contact of this Housing.
:param contact: The contact of this Housing.
:type contact: Contact
"""
self._contact = contact
@property
def tenancy_start(self) -> date:
"""Gets the tenancy_start of this Housing.
:return: The tenancy_start of this Housing.
:rtype: date
"""
return self._tenancy_start
@tenancy_start.setter
def tenancy_start(self, tenancy_start: date):
"""Sets the tenancy_start of this Housing.
:param tenancy_start: The tenancy_start of this Housing.
:type tenancy_start: date
"""
self._tenancy_start = tenancy_start
@property
def anti_social_behaviour(self) -> str:
"""Gets the anti_social_behaviour of this Housing.
:return: The anti_social_behaviour of this Housing.
:rtype: str
"""
return self._anti_social_behaviour
@anti_social_behaviour.setter
def anti_social_behaviour(self, anti_social_behaviour: str):
"""Sets the anti_social_behaviour of this Housing.
:param anti_social_behaviour: The anti_social_behaviour of this Housing.
:type anti_social_behaviour: str
"""
self._anti_social_behaviour = anti_social_behaviour
@property
def rent_arrears(self) -> str:
"""Gets the rent_arrears of this Housing.
:return: The rent_arrears of this Housing.
:rtype: str
"""
return self._rent_arrears
@rent_arrears.setter
def rent_arrears(self, rent_arrears: str):
"""Sets the rent_arrears of this Housing.
:param rent_arrears: The rent_arrears of this Housing.
:type rent_arrears: str
"""
self._rent_arrears = rent_arrears
@property
def notice_seeking_possession(self) -> str:
"""Gets the notice_seeking_possession of this Housing.
:return: The notice_seeking_possession of this Housing.
:rtype: str
"""
return self._notice_seeking_possession
@notice_seeking_possession.setter
def notice_seeking_possession(self, notice_seeking_possession: str):
"""Sets the notice_seeking_possession of this Housing.
:param notice_seeking_possession: The notice_seeking_possession of this Housing.
:type notice_seeking_possession: str
"""
self._notice_seeking_possession = notice_seeking_possession
@property
def eviction(self) -> str:
"""Gets the eviction of this Housing.
:return: The eviction of this Housing.
:rtype: str
"""
return self._eviction
@eviction.setter
def eviction(self, eviction: str):
"""Sets the eviction of this Housing.
:param eviction: The eviction of this Housing.
:type eviction: str
"""
self._eviction = eviction
|
[
"swagger_server.util.deserialize_model"
] |
[((3200, 3233), 'swagger_server.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (3222, 3233), False, 'from swagger_server import util\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-arguments,wrong-import-position
"""The `verdi` command line interface."""
import click_completion
# Activate the completion of parameter types provided by the click_completion package
click_completion.init()
# Import to populate the `verdi` sub commands
from aiida.cmdline.commands import (
cmd_archive,
cmd_calcjob,
cmd_code,
cmd_completioncommand,
cmd_computer,
cmd_config,
cmd_daemon,
cmd_data,
cmd_database,
cmd_devel,
cmd_group,
cmd_help,
cmd_node,
cmd_plugin,
cmd_process,
cmd_profile,
cmd_restapi,
cmd_run,
cmd_setup,
cmd_shell,
cmd_status,
cmd_user,
)
|
[
"click_completion.init"
] |
[((844, 867), 'click_completion.init', 'click_completion.init', ([], {}), '()\n', (865, 867), False, 'import click_completion\n')]
|
'''
Clean 'Component', 'Fleet', 'System', 'Mitigation' fields and output csv.
TODO: Refine.
# Author
@ <NAME>
# License
MIT License 2020
'''
# Import libraries and define functions
import re
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# Function to make all strings lowercase and replace spaces with underscores
def format_df (df):
df = df.applymap(lambda s:s.lower() if type(s) == str else s)
df.replace(' ', '_', regex=True, inplace = True)
df = df.fillna('')
return df
# Function to remove underscores and replace with spaces
def fix_text(df):
df.replace('_',' ', inplace=True, regex=True)
return df
# Plot parameters
plt.rcParams["figure.figsize"]=10,10
plt.rcParams['font.size'] =14
plt.rcParams['axes.labelsize']=14
plt.rcParams['figure.facecolor']='#ffffff'
plt.rcParams['grid.color'] = '#d3d3d3'
plt.rcParams['grid.linestyle'] = ':'
# Import the uncleansed CSV file
filename = './in/equipment_data_for_cleansing.csv'
df = pd.read_csv(filename, encoding = 'ISO-8859-1', dtype= str)
# Remove '_x00D_' endline characters and extra blank lines from free form fields
df['Basis'].replace('(?<=[a-z]|\))_x000D_',':',inplace=True, regex=True)
df['Basis'].replace('_x000D_|\?|!|_x000',' ',inplace=True, regex=True)
df['Basis'].replace('\n\s','',inplace=True, regex=True)
df['Mitigation'].replace('(?<=[a-z]|\))_x000D_',':',inplace=True, regex=True)
df['Mitigation'].replace('_x000D_|\?|!|_x000',' ',inplace=True, regex=True)
df['Mitigation'].replace('\n\s','',inplace=True, regex=True)
df['Elimination'].replace('(?<=[a-z]|\))_x000D_',':',inplace=True, regex=True)
df['Elimination'].replace('_x000D_|\?|!|_x000',' ',inplace=True, regex=True)
df['Elimination'].replace('\n\s','',inplace=True, regex=True)
df['NewComponent']=''
df['Method']=''
df['NewSystem']=''
'''
Fix 'Component' Name
'''
df2 = df
df2['ComponentDesc'].apply(str)
df2 = df2.fillna('')
#Create a smaller dataframe containing only the unspecified equipment entries and a new blank column
unspecified = df2[df2['Component']=='unspecified_equipment']
''' This block assigns component names to a column called New Component. If the component description column
contains the string it assigns the dictionary key. '''
comp_dict = {'fuse': ['fuse','fusible'],
'fan':['fan'],
'dampers_&_ducting':['damper'],
'battery':['battery'],
'condenser':['condenser'],
'pipe':['pipe', 'flex_hose$','^flex_hose'],
'rod_drive_mechanism':['crdm','control_rod_drive','control_element_drive'],
'circuit_breaker':['breaker'],
'transformers': ['transformer','xfmr'],
'pump':['rcp'],
'turbine_(steam)':['turbine', 'turb', 'lp', 'hp'],
'pump':['pump', 'pmp'],
'heat_exchanger':['cooler[s]*','_clr'],
'motor':['motor','mtr_winding'],
'tank':['tank'],
'circuit_breaker':['cubicle','bkr','circuit_breaker'],
'control_switch':['cntl_s','control_switch','ctrl_sw','sel_sw','isol_switch'],
'contactor':['contactor','aux_cont_'],
'valve':['valve','vlv'],
'flow_switch':['flow_switch'],
'relay':['relay','_rly','grnd_overcurrent','gnd_overcurrent'],
'junction_box':['junction_box'],
'expansion_joint':['expansion_joint','exp_joint','flex_joint'],
'positioner':['^positioner','positioner$', 'posnr$'],
'pressure_switch':['pressure_switch'],
'vibration_monitor':['vibration_monitor'],
'steam_jet_air_ejector':['steam_jet_air','steam_air_ejector','steam_jet_air_ejector']
}
comp_dict3 = {'transformers':['transformer'],
'expansion_joint':['expansion_joint'],}
for i in comp_dict:
list = comp_dict[i]
pattern = '|'.join(list)
unspecified.NewComponent.loc[unspecified.ComponentDesc.str.contains(pattern,case=False,na=False, regex=True)] = i
for i in comp_dict3:
list3 = comp_dict3[i]
pattern = '|'.join(list3)
unspecified.NewComponent.loc[unspecified.Subcomponent.str.contains(pattern,case=False,na=False, regex = True)] = i
# Assign 'NewComponent' categories to unspecified equipment
unspecified.NewComponent = unspecified.NewComponent.apply(lambda x: 'unspecified_equipment' if x =='' else x )
unspecified.Component = unspecified.NewComponent
df2[df2['Component'] == 'unspecified_equipment'] = unspecified
# Assign Grouped Component Names to match IRIS
df2.Component.loc[(df2.Component == 'accumulator')|(df2.Component == 'tank')] = 'accumulators,_tanks,_air_receivers'
df2.Component.loc[(df2.Component == 'air_dryer')] = 'air_dryers,_dehumidifiers'
df2.Component.loc[(df2.Component == 'annunciator_module')|(df2.Component == 'alarm')] = 'annunciator_modules,_alarms'
df2.Component.loc[(df2.Component == 'battery')|(df2.Component == 'battery_-_charger')] = 'batteries,_battery_chargers'
df2.Component.loc[(df2.Component == 'bistable')|(df2.Component == 'switch')|(df2.Component == 'i&c_-_temperature_switch')|(df2.Component == 'control_switch')|(df2.Component == 'pressure_switch')|(df2.Component == 'i&c_-_pressure_switch')|(df2.Component == 'flow_switch')] = 'bistable,_switch_(mechanical,_electronic)'
df2.Component.loc[(df2.Component == 'compressor')|(df2.Component == 'cooling_tower')|(df2.Component == 'air_handling_equipment')|(df2.Component == 'fan')|(df2.Component == 'cooling_unit')|(df2.Component == 'vacuum_pump')] = 'blowers,_compressors,_fans,_vacuum_pumps,_cooling_units'
df2.Component.loc[(df2.Component == 'heating_vessel')] = 'boilers,_heating_vessels,_excluding_reactor_vessels'
df2.Component.loc[(df2.Component == 'circuit_breaker')|(df2.Component == 'switchgear')|(df2.Component == 'switchgear_-_motor_control_centers')|(df2.Component == 'circuit_breaker_-_substation')|(df2.Component == 'contactor')|(df2.Component=='motor_controller')|(df2.Component=='manual_switch')|(df2.Component == 'fuse')|(df2.Component == 'circuit_card')] = 'circuit_breakers,_contactors,_motor_controllers,_manual_switches'
df2.Component.loc[(df2.Component == 'control_panel')] = 'control_board/panel'
df2.Component.loc[(df2.Component == 'control_rod')|(df2.Component == 'control_element_assembly')] = 'control_rods,_control_element_assemblies'
df2.Component.loc[(df2.Component == 'crane')] = 'crane,_hoist,_or_lifting_device'
df2.Component.loc[(df2.Component == 'demineralizer')|(df2.Component == 'ion_exchanger')] = 'demineralizers,_ion_exchangers'
df2.Component.loc[(df2.Component == 'feedwater_heater')] = 'electric_heaters'
df2.Component.loc[(df2.Component == 'bus')|(df2.Component == 'cable')|(df2.Component == 'junction_box')|(df2.Component == 'electrical_conductor')] = 'electrical_conductors,_bus,_cable,_wire'
df2.Component.loc[(df2.Component == 'i&c_-_capacitor_-_electrolytic')|(df2.Component == 'i&c_-_dc_power_supply')] = 'electronic_power_supply'
df2.Component.loc[(df2.Component == 'diesel_engine')] = 'engines_(gas,_diesel)'
df2.Component.loc[(df2.Component == 'filter')|(df2.Component == 'strainer')|(df2.Component == 'water_intake_screen')] = 'filters,_strainers,_screens'
# floor (none)
# roof (none)
df2.Component.loc[(df2.Component == 'generator')|(df2.Component == 'inverter')|(df2.Component == 'motor_generator')|(df2.Component == 'main_generator_-_exciter')]='generators,_inverters,_motor_generators'
df2.Component.loc[(df2.Component == 'governor')|(df2.Component == 'fluid_drive')|(df2.Component == 'coupling')|(df2.Component == 'gearbox')|(df2.Component=='gearbox_with_cooler')]='governors,_couplings,_gear_boxes'
df2.Component.loc[(df2.Component == 'heat_exchanger')|(df2.Component == 'condenser')|(df2.Component == 'steam_jet_air_ejector')|(df2.Component == 'steam_generator')] = 'heat_exchanger,_condenser,_steam_generator'
#Illumination Source (None)
df2.Component.loc[(df2.Component == 'indicator')|(df2.Component == 'recorder')|(df2.Component == 'gauge')] = 'indicators,_recorders,_gauges'
df2.Component.loc[(df2.Component == 'positioner')|(df2.Component == 'i&c_-_analog_electronic_controller')|(df2.Component == 'i&c_-_positioner')|(df2.Component == 'i&c_-_booster')|(df2.Component == 'i&c_-_pneumatic_controller')|(df.Component == 'i&c_-_pressure_regulator')|(df.Component == 'i&c_-_signal_conditioner')] = 'instrument_controllers'
# integrator/computation module OK
# isolation devices ok
# landscaping (none)
# Manual tools (none)
df2.Component.loc[(df2.Component == 'motor')|(df2.Component == 'motor_(electric)')|(df2.Component == 'motor_(hydraulic)')|(df2.Component == 'motors_(electric)')|(df2.Component == 'motors_(hydraulic)')] = 'motors_(electric,_hydraulic,_pneumatic)'
df2.Component.loc[(df2.Component == 'penetration')] = 'penetrations,_air_locks,_hatches'
df2.Component.loc[(df2.Component == 'pipe')|(df2.Component == 'fitting')|(df2.Component == 'expansion_joint')|(df2.Component == 'rupture_disc')] = 'pipes,_fittings,_rupture_discs'
# Power Tools (None)
df2.Component.loc[(df2.Component == 'pressure_vessel')|(df2.Component == 'pressurizer')|(df2.Component == 'reactor_vessel')] = 'pressure_vessel,_reactor_vessel,_pressurizer'
# Process Fluid (None)
df2.Component.loc[(df2.Component == 'pump')|(df2.Component == 'pump_-_vertical')|(df2.Component == 'lube_oil_pump')] = 'pumps,_eductors'
# Recombiners (None)
df2.Component.loc[(df2.Component.str.contains('relay', case=False))] = 'relays'
df2.Component.loc[(df2.Component == 'rod_drive_mechanism')] = 'rod_drive_mechanism,_hydraulic_control_unit'
# Room (None)
# Software (None)
df2.Component.loc[(df2.Component == 'support')] = 'supports,_hangers,_snubbers'
df2.Component.loc[(df2.Component == 'transformers')|(df2.Component == 'voltage_regulator')|(df2.Component == 'shunt_reactor')] = 'transformers,_shunt_reactors'
#Transient Combustible (None)
df2.Component.loc[(df2.Component == 'transmitter')|(df2.Component == 'i&c_-_i/p_and_e/p_transducer')|(df2.Component == 'i&c_-_pressure_sensor_and_transmitter')|(df2.Component == 'radiation_monitor')|(df2.Component == 'vibration_monitor')|(df2.Component == 'detector')] = 'transmitters,_detectors,_elements'
df2.Component.loc[(df2.Component == 'turbine_(steam)')|(df2.Component == 'main_turbine')|(df2.Component == 'main_turbine_-_ehc_hydraulics')|(df2.Component == 'main_turbine_-_mhc_controls')|(df2.Component == 'main_turbine_-_trip_system')]= 'turbines_(steam,_gas)'
df2.Component.loc[(df2.Component == 'valve_actuator')] = 'valve_operators'
df2.Component.loc[(df2.Component == 'valve')|(df2.Component=='valve_-_air_operated')|(df2.Component == 'valve_-_air_operated_-_aov_-_piston')|(df2.Component=='valve_-_ball')|(df2.Component == 'valve_-_check')|(df2.Component=='valve_-_gate')|(df2.Component == 'valve_-_globe')|(df2.Component=='valve_-_motor_operated')|(df2.Component == 'valve_-_power_operated_relief')|(df2.Component=='valve_-_pressure_relief')|(df2.Component == 'valve_-_solenoid_operated')|(df2.Component=='valve_-_steam_turbine')|(df2.Component == 'dampers_&_ducting')]= 'valves,_dampers'
# Vehicle (None)
# Wall (None)
# Waste Bin (None)
# Working Fluid (None)
# Assign new component names to original dataframe
df.NewComponent = df2['Component']
# # Output Final Cleansed csv File
df3 = df
df3 = df3.replace('_',' ',regex= True)
df3.NewComponent = df.NewComponent.str.title()
df3.NewSystem=df3.NewSystem.str.title()
df3.to_csv('cleansed_equipment_data.csv')
# # Search for a system using this:
# #df.loc[df.NewSystem.str.contains('main_generator_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size()
# #Outputs only results used in Table. Percentages were calculated in excel. see Table 3_2 and Table 3_3.xlsx
# dictionary = {'feedwater_system':['valves,_dampers','valve_operators','pumps,_eductors','instrument_controllers'],
# 'mainreheat_steam_system':['valves,_dampers','valve_operators','pipes,_fittings,_rupture_discs','instrument_controllers'],
# 'main_generator_output_power_system':['transformers,_shunt_reactors','relays','electrical_conductors,_bus,_cable,_wire','generators,_inverters,_motor_generators','circuit_breakers,_contactors,_motor_controllers,_manual_switches'],
# 'medium_voltage_power_sys':['generators,_inverters,_motor_generators','circuit_breakers,_contactors,_motor_controllers,_manual_switches','relays','transformers,_shunt_reactors'],
# 'main_generator_system':['relays','generators,_inverters,_motor_generators','instrument_controllers','transformers,_shunt_reactors'],
# 'main_turbine_system':['turbines_(steam,_gas)','instrument_controllers','valve_operators','valves,_dampers'],
# 'reactor_coolant_system':['motors_(electric,_hydraulic,_pneumatic)','circuit_breakers,_contactors,_motor_controllers,_manual_switches','valves,_dampers','pumps,_eductors'],
# 'condensate_system':['heat_exchanger,_condenser,_steam_generator','motors_(electric,_hydraulic,_pneumatic)','pumps,_eductors'] }
# for i in dictionary:
# print('\n',i,'\n')
# for j in dictionary[i]:
# print(df.loc[(df.NewSystem.str.contains(i) & df.NewComponent.str.contains(j))].groupby(['NewSystem','NewComponent','Method']).size())
# # Print the remaining systems where component will not display
# print('\n','main_turbine_system','\n',df.loc[df.NewSystem.str.contains('main_turbine_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
# print('\n','reactor_coolant_system','\n',df.loc[df.NewSystem.str.contains('reactor_coolant_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
# print('\n','condensate_system','\n',df.loc[df.NewSystem.str.contains('condensate_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
'''
SPV Mitigation strategy heatmap
'''
# # # Table 3-2: SPV Mitigation Strategy Category Heatmap
#
# # Search for a system using this:
# #df.loc[df.NewSystem.str.contains('main_generator_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size()
# #Outputs only results used in Table. Percentages were calculated in excel. see Table 3_2 and Table 3_3.xlsx
# dictionary = {'feedwater_system':['valves,_dampers','valve_operators','pumps,_eductors','instrument_controllers'],
# 'mainreheat_steam_system':['valves,_dampers','valve_operators','pipes,_fittings,_rupture_discs','instrument_controllers'],
# 'main_generator_output_power_system':['transformers,_shunt_reactors','relays','electrical_conductors,_bus,_cable,_wire','generators,_inverters,_motor_generators','circuit_breakers,_contactors,_motor_controllers,_manual_switches'],
# 'medium_voltage_power_sys':['generators,_inverters,_motor_generators','circuit_breakers,_contactors,_motor_controllers,_manual_switches','relays','transformers,_shunt_reactors'],
# 'main_generator_system':['relays','generators,_inverters,_motor_generators','instrument_controllers','transformers,_shunt_reactors'],
# 'main_turbine_system':['turbines_(steam,_gas)','instrument_controllers','valve_operators','valves,_dampers'],
# 'reactor_coolant_system':['motors_(electric,_hydraulic,_pneumatic)','circuit_breakers,_contactors,_motor_controllers,_manual_switches','valves,_dampers','pumps,_eductors'],
# 'condensate_system':['heat_exchanger,_condenser,_steam_generator','motors_(electric,_hydraulic,_pneumatic)','pumps,_eductors'] }
# for i in dictionary:
# print('\n',i,'\n')
# for j in dictionary[i]:
# print(df.loc[(df.NewSystem.str.contains(i) & df.NewComponent.str.contains(j))].groupby(['NewSystem','NewComponent','Method']).size())
# # Print the remaining systems where component will not display
# print('\n','main_turbine_system','\n',df.loc[df.NewSystem.str.contains('main_turbine_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
# print('\n','reactor_coolant_system','\n',df.loc[df.NewSystem.str.contains('reactor_coolant_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
# print('\n','condensate_system','\n',df.loc[df.NewSystem.str.contains('condensate_system',regex=True)].groupby(['NewSystem','NewComponent','Method']).size())
'''
Mitigation Strategy by Fleet
'''
# # # Table 3-3 Mitigation Strategy Categories by Fleet
# fleetlist=df.Fleet.unique()
# for i in fleetlist:
# number = df.loc[df.Fleet==i].Method.value_counts()/sum(df.loc[df.Fleet==i].Method.value_counts()) * 100
# print(i,'\n',number)
# fleet_plot.ScramScaled
|
[
"pandas.read_csv"
] |
[((1009, 1064), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'encoding': '"""ISO-8859-1"""', 'dtype': 'str'}), "(filename, encoding='ISO-8859-1', dtype=str)\n", (1020, 1064), True, 'import pandas as pd\n')]
|
from automationcommon.models import clear_local_user, set_local_user
class RequestUserMiddleware(object):
"""
Middleware that simply set's the request.user to be used for the audit trail.
"""
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
# If we're being called as a new-style middleware then get_response
# should have been passed to us in __init__.
assert self.get_response is not None
self.process_request(request)
response = self.get_response(request)
return self.process_response(request, response)
@classmethod
def process_request(cls, request):
set_local_user(request.user)
@classmethod
def process_response(cls, request, response):
"""
Clear the user to minimise the chance of a user being wrongly assigned.
"""
clear_local_user()
return response
@classmethod
def process_exception(cls, request, exception):
"""
Clear the user to minimise the chance of a user being wrongly assigned.
"""
clear_local_user()
|
[
"automationcommon.models.set_local_user",
"automationcommon.models.clear_local_user"
] |
[((703, 731), 'automationcommon.models.set_local_user', 'set_local_user', (['request.user'], {}), '(request.user)\n', (717, 731), False, 'from automationcommon.models import clear_local_user, set_local_user\n'), ((912, 930), 'automationcommon.models.clear_local_user', 'clear_local_user', ([], {}), '()\n', (928, 930), False, 'from automationcommon.models import clear_local_user, set_local_user\n'), ((1137, 1155), 'automationcommon.models.clear_local_user', 'clear_local_user', ([], {}), '()\n', (1153, 1155), False, 'from automationcommon.models import clear_local_user, set_local_user\n')]
|
# -*- coding: utf-8 -*-
'''
@Author: <NAME>
@Description: file content
@Date: 2019-04-20 16:53:54
@LastEditTime: 2019-04-21 20:42:41
'''
import os
import sys
import re
import cv2
PATH_RESULT = 'xxx'
PATH_VIDEO = "xxx"
PATH_CROP_IMG = "xx"
def cropXY(img, point1, point2):
min_x = min(int(point1[0]), int(point2[0]))
min_y = min(int(point1[1]), int(point2[1]))
width = abs(int(point1[0]) - int(point2[0]))
height = abs(int(point1[1]) - int(point2[1]))
cut_img = img[min_y:min_y + height, min_x:min_x + width]
return cut_img
if __name__ == "__main__":
print("=====================")
print("video_num:{}".format(len(os.listdir(PATH_VIDEO))))
print("txt_num:{}".format(len(os.listdir(PATH_RESULT))))
print("=====================")
# temp_video = os.path.join(PATH_VIDEO, "ch04_20190324074316.mp4")
# temp_txt = os.path.join(PATH_RESULT, "ch04_20190324074316.txt")
_id = 0
for lists in os.listdir(PATH_RESULT):
_id += 1
name = lists.split('.')[0]
temp_video = os.path.join(PATH_VIDEO, name + ".mp4")
temp_txt = os.path.join(PATH_RESULT, name + ".txt")
pathCropImgDetail = os.path.join(PATH_CROP_IMG, name)
if os.path.isdir(pathCropImgDetail):
print("[{}/{}]Video:{} COMPELETE! Skip ...".format(
_id, len(os.listdir(PATH_RESULT)), temp_video))
continue
else:
os.makedirs(pathCropImgDetail)
lines = open(temp_txt).readlines()
if len(lines) == 1:
print("[{}/{}]Video:{} NO People! Skip ...".format(
_id, len(os.listdir(PATH_RESULT)), temp_video))
continue
print("[{}/{}] {}<-->{}".format(_id, len(os.listdir(PATH_RESULT)),
temp_txt, temp_video))
cropFrameId = []
point1 = []
point2 = []
for _index, line in enumerate(lines):
if _index == 0:
continue
s = re.findall(r'[(|\[](.*?)[)|\]]', line)
cropFrameId.append(s[0])
point1.append(s[1].split(','))
point2.append(s[2].split(','))
# print(_index, len(cropFrameId), len(point1), len(point2))
# exit(0)
_framesId = 0
_cropFrameId = 0
cap = cv2.VideoCapture(temp_video)
while cap.isOpened():
ret, frame_img = cap.read()
if ret and (_cropFrameId != _index):
sys.stdout.write(
"\r Scanning Frames[{}<->{}], Crop Frames[{}/{}]".format(
_framesId, int(cropFrameId[_cropFrameId]),
_cropFrameId + 1, _index))
sys.stdout.flush()
if _framesId == int(cropFrameId[_cropFrameId]):
sameId = 0
cut_img = cropXY(frame_img, point1[_cropFrameId],
point2[_cropFrameId])
cv2.imwrite(
os.path.join(pathCropImgDetail,
str(_framesId) + ".jpg"), cut_img)
_cropFrameId += 1
elif int(cropFrameId[_cropFrameId]) == _framesId - 1:
cut_img = cropXY(frame_img, point1[_cropFrameId],
point2[_cropFrameId])
cv2.imwrite(
os.path.join(
pathCropImgDetail,
str(_framesId) + "_" + str(sameId) + ".jpg"),
cut_img)
sameId += 1
_cropFrameId += 1
_framesId -= 1
_framesId += 1
else:
break
print()
|
[
"os.makedirs",
"os.path.isdir",
"cv2.VideoCapture",
"re.findall",
"sys.stdout.flush",
"os.path.join",
"os.listdir"
] |
[((942, 965), 'os.listdir', 'os.listdir', (['PATH_RESULT'], {}), '(PATH_RESULT)\n', (952, 965), False, 'import os\n'), ((1040, 1079), 'os.path.join', 'os.path.join', (['PATH_VIDEO', "(name + '.mp4')"], {}), "(PATH_VIDEO, name + '.mp4')\n", (1052, 1079), False, 'import os\n'), ((1099, 1139), 'os.path.join', 'os.path.join', (['PATH_RESULT', "(name + '.txt')"], {}), "(PATH_RESULT, name + '.txt')\n", (1111, 1139), False, 'import os\n'), ((1169, 1202), 'os.path.join', 'os.path.join', (['PATH_CROP_IMG', 'name'], {}), '(PATH_CROP_IMG, name)\n', (1181, 1202), False, 'import os\n'), ((1214, 1246), 'os.path.isdir', 'os.path.isdir', (['pathCropImgDetail'], {}), '(pathCropImgDetail)\n', (1227, 1246), False, 'import os\n'), ((2302, 2330), 'cv2.VideoCapture', 'cv2.VideoCapture', (['temp_video'], {}), '(temp_video)\n', (2318, 2330), False, 'import cv2\n'), ((1423, 1453), 'os.makedirs', 'os.makedirs', (['pathCropImgDetail'], {}), '(pathCropImgDetail)\n', (1434, 1453), False, 'import os\n'), ((1993, 2032), 're.findall', 're.findall', (['"""[(|\\\\[](.*?)[)|\\\\]]"""', 'line'], {}), "('[(|\\\\[](.*?)[)|\\\\]]', line)\n", (2003, 2032), False, 'import re\n'), ((649, 671), 'os.listdir', 'os.listdir', (['PATH_VIDEO'], {}), '(PATH_VIDEO)\n', (659, 671), False, 'import os\n'), ((709, 732), 'os.listdir', 'os.listdir', (['PATH_RESULT'], {}), '(PATH_RESULT)\n', (719, 732), False, 'import os\n'), ((2696, 2714), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2712, 2714), False, 'import sys\n'), ((1724, 1747), 'os.listdir', 'os.listdir', (['PATH_RESULT'], {}), '(PATH_RESULT)\n', (1734, 1747), False, 'import os\n'), ((1337, 1360), 'os.listdir', 'os.listdir', (['PATH_RESULT'], {}), '(PATH_RESULT)\n', (1347, 1360), False, 'import os\n'), ((1615, 1638), 'os.listdir', 'os.listdir', (['PATH_RESULT'], {}), '(PATH_RESULT)\n', (1625, 1638), False, 'import os\n')]
|
from distutils.core import setup
setup(name='pyLucidIo',
version='',
description='',
author='',
url='',
#py_modules=['lucidIo'],
packages=['PbSignal'])
|
[
"distutils.core.setup"
] |
[((33, 130), 'distutils.core.setup', 'setup', ([], {'name': '"""pyLucidIo"""', 'version': '""""""', 'description': '""""""', 'author': '""""""', 'url': '""""""', 'packages': "['PbSignal']"}), "(name='pyLucidIo', version='', description='', author='', url='',\n packages=['PbSignal'])\n", (38, 130), False, 'from distutils.core import setup\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
This file contains the custom methods which are executed whenever any command is called.
The custom methods are linked to the commands at the time of command registeration (commands.py).
"""
from knack.util import CLIError
def list_private_cloud(client, location):
"""
Returns a list of private clouds in a region.
"""
return client.list(location)
def show_private_cloud(client, private_cloud, location):
"""
Get the details of a private cloud.
"""
return client.get(private_cloud, location)
def list_resource_pool(client, private_cloud, location):
"""
Returns the list of resource pool in the specified private cloud.
"""
return client.list(location, private_cloud)
def show_resource_pool(client, private_cloud, resource_pool, location):
"""
Returns the details of a resource pool.
"""
return client.get(location, private_cloud, resource_pool)
def list_virtual_networks(client, private_cloud, resource_pool, location):
"""
Returns the list of available virtual networks in a resource pool, in a private cloud.
"""
return client.list(location, private_cloud, resource_pool)
def show_virtual_network(client, private_cloud, virtual_network, location):
"""
Returns the details of a virtual network in a private cloud.
"""
return client.get(location, private_cloud, virtual_network)
def list_vm_template(client, private_cloud, resource_pool, location):
"""
Returns the list of VMware virtual machines templates in a resource pool, in a private cloud.
"""
return client.list(private_cloud, location, resource_pool)
def show_vm_template(client, private_cloud, template, location):
"""
Returns details of a VMware virtual machines template in a private cloud.
"""
return client.get(location, private_cloud, template)
# --------------------------------------------------------------------------------------------
# Virtual Machine APIs
# --------------------------------------------------------------------------------------------
def _modify_template_disks_according_to_input(template_disks, input_disks):
"""
Change template disks according to the input given by the user.
"""
# Populating the disk names of vm-template in a dictionary,
# and mapping them to their index in template_disks list
vm_template_disk_names = {}
for (i, disk) in enumerate(template_disks):
vm_template_disk_names[disk.virtual_disk_name] = i
from .vendored_sdks.models import VirtualDisk
# Check if disks entered by the user exist in vm-template,
# then override the properties specified. Else create a new disk.
for disk in input_disks:
if disk['name'] in vm_template_disk_names.keys():
index = vm_template_disk_names[disk['name']]
if 'controller' in disk.keys():
template_disks[index].controller_id = disk['controller']
if 'mode' in disk.keys():
template_disks[index].independence_mode = disk['mode']
if 'size' in disk.keys():
template_disks[index].total_size = disk['size']
else:
disk_name = disk['name']
if 'controller' in disk.keys():
controller = disk['controller']
else:
raise CLIError('controller parameter not specified for disk ' + disk_name + ".")
if 'mode' in disk.keys():
mode = disk['mode']
else:
raise CLIError('mode parameter not specified for disk ' + disk_name + ".")
if 'size' in disk.keys():
size = disk['size']
else:
raise CLIError('size parameter not specified for disk ' + disk_name + ".")
disk_object = VirtualDisk(controller_id=controller,
independence_mode=mode,
total_size=size)
template_disks.append(disk_object)
return template_disks
def _modify_template_nics_according_to_input(template_nics, input_nics, cmd, client,
resource_group_name, vm_name,
location, private_cloud):
"""
Change template nics according to the input given by the user.
"""
# Populating the nic names of vm-template in a dictionary,
# and mapping them to their index in template_nics list
vm_template_nic_names = {}
for (i, nic) in enumerate(template_nics):
vm_template_nic_names[nic.virtual_nic_name] = i
from .vendored_sdks.models import VirtualNic
from .vendored_sdks.models import VirtualNetwork
from ._validators import virtual_network_name_or_id_validator
# Check if nics entered by a user exist in vm-template,
# then override the properties specified. Else create a new nic.
for nic in input_nics:
if nic['name'] in vm_template_nic_names.keys():
index = vm_template_nic_names[nic['name']]
if 'virtual-network' in nic.keys():
template_nics[index].network.id = nic['virtual-network']
if 'adapter' in nic.keys():
template_nics[index].nic_type = nic['adapter']
if 'power-on-boot' in nic.keys():
template_nics[index].power_on_boot = nic['power-on-boot']
template_nics[index].virtual_nic_id = None
else:
nic_name = nic['name']
if 'virtual-network' in nic.keys():
vnet = nic['virtual-network']
else:
raise CLIError('virtual-network parameter not specified for nic ' +
nic_name + ".")
if 'adapter' in nic.keys():
adapter = nic['adapter']
else:
raise CLIError('adapter parameter not specified for nic ' +
nic_name + ".")
if 'power-on-boot' in nic.keys():
power_on_boot = nic['power-on-boot']
else:
raise CLIError('power-on-boot parameter not specified for nic ' +
nic_name + ".")
vnet = virtual_network_name_or_id_validator(cmd, client, vnet,
resource_group_name, vm_name,
location, private_cloud)
network = VirtualNetwork(id=vnet)
nic_object = VirtualNic(network=network,
nic_type=adapter,
power_on_boot=power_on_boot)
template_nics.append(nic_object)
return template_nics
def create_vm(cmd, client, resource_group_name, vm_name,
private_cloud, template, resource_pool,
amount_of_ram=None, number_of_cores=None,
location=None, expose_to_guest_vm=None,
nics=None, disks=None):
"""
Create or update a VMware virtual machine.
The vm-template specified is used as a template for creation.
"""
from .vendored_sdks.models import VirtualMachine
from .vendored_sdks.models import ResourcePool
from ._config import PATH_CHAR
resource_pool = ResourcePool(id=resource_pool)
# Extracting template and private cloud name from the resource id
template_name = template.rsplit(PATH_CHAR, 1)[-1]
private_cloud_name = private_cloud.rsplit(PATH_CHAR, 1)[-1]
vm_template = client.virtual_machine_templates.get(location, private_cloud_name, template_name)
cores = number_of_cores or vm_template.number_of_cores
ram = amount_of_ram or vm_template.amount_of_ram
expose = vm_template.expose_to_guest_vm
if expose_to_guest_vm is not None:
expose = expose_to_guest_vm
final_disks = vm_template.disks
if disks is not None:
final_disks = _modify_template_disks_according_to_input(final_disks, disks)
final_nics = vm_template.nics
if nics is not None:
final_nics = _modify_template_nics_according_to_input(final_nics, nics, cmd, client,
resource_group_name, vm_name,
location, private_cloud)
virtual_machine = VirtualMachine(location=location,
amount_of_ram=ram,
disks=final_disks,
expose_to_guest_vm=expose,
nics=final_nics,
number_of_cores=cores,
private_cloud_id=private_cloud,
resource_pool=resource_pool,
template_id=template)
return client.virtual_machines.create_or_update(resource_group_name, vm_name, virtual_machine)
def list_vm(client, resource_group_name=None):
"""
Returns a list of VMware virtual machines in the current subscription.
If resource group is specified, only the virtual machines
in that resource group would be listed.
"""
if resource_group_name is None:
return client.list_by_subscription()
return client.list_by_resource_group(resource_group_name)
def delete_vm(client, resource_group_name, vm_name):
"""
Delete a VMware virtual machine.
"""
return client.delete(resource_group_name, vm_name)
def get_vm(client, resource_group_name, vm_name):
"""
Returns a VMware virtual machine.
"""
return client.get(resource_group_name, vm_name)
def start_vm(client, resource_group_name, vm_name):
"""
Start a VMware virtual machine.
"""
return client.start(resource_group_name, vm_name)
def stop_vm(client, resource_group_name, vm_name, stop_mode):
"""
Stop a VMware virtual machine.
"""
return client.stop(resource_group_name, vm_name, stop_mode)
def update_vm(client, resource_group_name, vm_name, **kwargs):
"""
Update VMware virtual machine tags.
"""
return client.update(resource_group_name, vm_name, kwargs['parameters'].tags)
# --------------------------------------------------------------------------------------------
# VM nics APIs
# --------------------------------------------------------------------------------------------
def add_vnic(cmd, client, resource_group_name, vm_name,
virtual_network, adapter="VMXNET3", power_on_boot=False):
"""
Add virtual network interface to a VMware virtual machine.
"""
from .vendored_sdks.models import VirtualNic
from .vendored_sdks.models import VirtualNetwork
from ._validators import virtual_network_name_or_id_validator
virtual_machine = client.get(resource_group_name, vm_name)
virtual_network = virtual_network_name_or_id_validator(cmd, client, virtual_network, resource_group_name, vm_name)
network = VirtualNetwork(id=virtual_network)
nic = VirtualNic(network=network,
nic_type=adapter,
power_on_boot=power_on_boot)
virtual_machine.nics.append(nic)
return client.create_or_update(resource_group_name, vm_name, virtual_machine)
def list_vnics(client, resource_group_name, vm_name):
"""
List details of a VMware virtual machine's nics.
"""
virtual_machine = client.get(resource_group_name, vm_name)
return virtual_machine.nics
def show_vnic(client, resource_group_name, vm_name, nic_name):
"""
Get the details of a VMware virtual machine's NIC.
"""
virtual_machine = client.get(resource_group_name, vm_name)
for nic in virtual_machine.nics:
if nic.virtual_nic_name == nic_name:
return nic
return None
def delete_vnics(client, resource_group_name, vm_name, nic_names):
"""
Delete NICs from a VM.
"""
import copy
virtual_machine = client.get(resource_group_name, vm_name)
# Dictionary to maintain the nics to delete
to_delete_nics = {}
for nic_name in nic_names:
to_delete_nics[nic_name] = True
# We'll be iterating over virtual_machine.nics.
# Hence we need a copy of that which we can modify within the loop.
final_nics = copy.deepcopy(virtual_machine.nics)
for nic in virtual_machine.nics:
if nic.virtual_nic_name in to_delete_nics.keys():
final_nics.remove(nic)
to_delete_nics[nic.virtual_nic_name] = False
virtual_machine.nics = final_nics
client.create_or_update(resource_group_name, vm_name, virtual_machine)
not_deleted_nics = ""
for nic_name in to_delete_nics:
if to_delete_nics[nic_name]:
not_deleted_nics = not_deleted_nics + nic_name + ", "
not_deleted_nics = not_deleted_nics[:-2]
if not_deleted_nics != "":
raise CLIError(not_deleted_nics + ' not present in the given virtual machine. Other nics (if mentioned) were deleted.')
# --------------------------------------------------------------------------------------------
# VM disks APIs
# --------------------------------------------------------------------------------------------
def add_vdisk(client, resource_group_name, vm_name, controller="1000",
independence_mode="persistent", size=16777216):
"""
Add disk to a VMware virtual machine
"""
from .vendored_sdks.models import VirtualDisk
virtual_machine = client.get(resource_group_name, vm_name)
disk = VirtualDisk(controller_id=controller,
independence_mode=independence_mode,
total_size=size)
virtual_machine.disks.append(disk)
return client.create_or_update(resource_group_name, vm_name, virtual_machine)
def list_vdisks(client, resource_group_name, vm_name):
"""
List details of disks available on a VMware virtual machine.
"""
virtual_machine = client.get(resource_group_name, vm_name)
return virtual_machine.disks
def show_vdisk(client, resource_group_name, vm_name, disk_name):
"""
Get the details of a VMware virtual machine's disk.
"""
virtual_machine = client.get(resource_group_name, vm_name)
for disk in virtual_machine.disks:
if disk.virtual_disk_name == disk_name:
return disk
return None
def delete_vdisks(client, resource_group_name, vm_name, disk_names):
"""
Delete disks from a VM.
"""
import copy
virtual_machine = client.get(resource_group_name, vm_name)
# Dictionary to maintain the disks to delete
to_delete_disks = {}
for disk_name in disk_names:
to_delete_disks[disk_name] = True
# We'll be iterating over virtual_machine.disks.
# Hence we need a copy of that which we can modify within the loop.
final_disks = copy.deepcopy(virtual_machine.disks)
for disk in virtual_machine.disks:
if disk.virtual_disk_name in to_delete_disks.keys():
final_disks.remove(disk)
to_delete_disks[disk.virtual_disk_name] = False
virtual_machine.disks = final_disks
client.create_or_update(resource_group_name, vm_name, virtual_machine)
not_deleted_disks = ""
for disk_name in to_delete_disks:
if to_delete_disks[disk_name]:
not_deleted_disks = not_deleted_disks + disk_name + ", "
not_deleted_disks = not_deleted_disks[:-2]
if not_deleted_disks != "":
raise CLIError(not_deleted_disks + ' not present in the given virtual machine. Other disks (if mentioned) were deleted.')
|
[
"copy.deepcopy",
"knack.util.CLIError"
] |
[((12637, 12672), 'copy.deepcopy', 'copy.deepcopy', (['virtual_machine.nics'], {}), '(virtual_machine.nics)\n', (12650, 12672), False, 'import copy\n'), ((15179, 15215), 'copy.deepcopy', 'copy.deepcopy', (['virtual_machine.disks'], {}), '(virtual_machine.disks)\n', (15192, 15215), False, 'import copy\n'), ((13230, 13352), 'knack.util.CLIError', 'CLIError', (["(not_deleted_nics +\n ' not present in the given virtual machine. Other nics (if mentioned) were deleted.'\n )"], {}), "(not_deleted_nics +\n ' not present in the given virtual machine. Other nics (if mentioned) were deleted.'\n )\n", (13238, 13352), False, 'from knack.util import CLIError\n'), ((15796, 15920), 'knack.util.CLIError', 'CLIError', (["(not_deleted_disks +\n ' not present in the given virtual machine. Other disks (if mentioned) were deleted.'\n )"], {}), "(not_deleted_disks +\n ' not present in the given virtual machine. Other disks (if mentioned) were deleted.'\n )\n", (15804, 15920), False, 'from knack.util import CLIError\n'), ((3687, 3761), 'knack.util.CLIError', 'CLIError', (["('controller parameter not specified for disk ' + disk_name + '.')"], {}), "('controller parameter not specified for disk ' + disk_name + '.')\n", (3695, 3761), False, 'from knack.util import CLIError\n'), ((3876, 3944), 'knack.util.CLIError', 'CLIError', (["('mode parameter not specified for disk ' + disk_name + '.')"], {}), "('mode parameter not specified for disk ' + disk_name + '.')\n", (3884, 3944), False, 'from knack.util import CLIError\n'), ((4059, 4127), 'knack.util.CLIError', 'CLIError', (["('size parameter not specified for disk ' + disk_name + '.')"], {}), "('size parameter not specified for disk ' + disk_name + '.')\n", (4067, 4127), False, 'from knack.util import CLIError\n'), ((5975, 6052), 'knack.util.CLIError', 'CLIError', (["('virtual-network parameter not specified for nic ' + nic_name + '.')"], {}), "('virtual-network parameter not specified for nic ' + nic_name + '.')\n", (5983, 6052), False, 'from knack.util import CLIError\n'), ((6205, 6274), 'knack.util.CLIError', 'CLIError', (["('adapter parameter not specified for nic ' + nic_name + '.')"], {}), "('adapter parameter not specified for nic ' + nic_name + '.')\n", (6213, 6274), False, 'from knack.util import CLIError\n'), ((6445, 6520), 'knack.util.CLIError', 'CLIError', (["('power-on-boot parameter not specified for nic ' + nic_name + '.')"], {}), "('power-on-boot parameter not specified for nic ' + nic_name + '.')\n", (6453, 6520), False, 'from knack.util import CLIError\n')]
|
#! python
# -*- coding: utf-8 -*-
"""
WavyTool is a simple program that allows you to acquire data from input devices,
i.e microphones, and save them as file (csv, png). Also, you can perform
some simple processing as spectral analysis.
:authors: <NAME>, <NAME>
:contact: <EMAIL>, <EMAIL>
:since: 2015/02/27
"""
import collections
import json
import logging
import os
import sys
import time
import urllib.request
import numpy as np
# QtPy must be imported before pyqtgraph
from qtpy.QtCore import QTimer
from qtpy.QtGui import QPixmap
from qtpy.QtWidgets import (QApplication, QFileDialog, QMainWindow,
QMessageBox, QSplashScreen)
# Must be set to the same binding here
api_names = {'pyqt5': 'PyQt5', 'pyside2': 'PySide2', 'pyqt4': 'PyQt4', 'pyside': 'PySide'}
os.environ['PYQTGRAPH_QT_LIB'] = api_names[os.environ['QT_API']]
# PyQtGraph must be imported after QtPy
import pyqtgraph as pg
# Then import the own interface
from wavytool import __version__ as version
from wavytool import app_name
from wavytool.core_wavy import AudioRecord
from wavytool.gui_wav2dat import ConvertWave2Data
from wavytool.mw_wavy import Ui_MainWindow
logging.basicConfig(level=logging.DEBUG)
# Informing about used binding
logging.info('Using Qt binding (QtPy/PyQtGraph): %s', (os.environ['QT_API'],
os.environ['PYQTGRAPH_QT_LIB']))
about = ("<h3>{} v.{}</h3>"
"<p>© <NAME>, <NAME><br/>"
"Sao Carlos Institute of Physics<br/>"
"University of Sao Paulo<br/>"
"<a href='https://github.com/dpizetta/wavy'>WavyTool on GitHub</a><br/>"
"<a href='https://pypi.org/project/wavytool'>WavyTool on PyPI</a><br/>"
"<a href='http://choosealicense.com/licenses/mit'>MIT License</a><br/></p>").format(app_name, version)
def main():
"""The main function."""
args = sys.argv[1:]
wavy = QApplication(args)
wavy.setApplicationVersion(version)
wavy.setApplicationName(app_name)
wavy.setOrganizationName("Sao Carlos Institute of Physics - University of Sao Paulo")
wavy.setOrganizationDomain("www.ifsc.usp.br")
try:
import qdarkstyle
except ImportError:
logging.warning("No dark theme installed, use 'pip install qdarkstyle' to install.")
else:
try:
wavy.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
except Exception as err:
logging.warning("Problems using qdarkstyle.\nError: %s", str(err))
pixmap = QPixmap("wavytool/images/symbol.png")
splash = QSplashScreen(pixmap)
start = time.time()
splash.show()
splash.repaint()
splash.showMessage("Loading...")
wavy.processEvents()
while time.time() - start < 1:
time.sleep(0.001)
wavy.processEvents()
splash.showMessage("Starting...")
window = MainWindow()
window.showMaximized()
splash.finish(window)
try:
with open('wavytool.config', 'r') as json_file:
data = json.load(json_file)
window.base_path = data['data_folder']
logging.info('Data folder is: %s', window.base_path)
except IOError:
window.getDataFolder()
return wavy.exec_()
class GlobalBuffer():
"""Allows real-time data transfer between plots."""
def __init__(self, buffer_size=1024):
self.recording = False
self.buffer_size = buffer_size
self.data = np.empty(self.buffer_size)
self.counter = 0
self.timestamp = 0
self.time_limit = 0
def startRecording(self):
self.timestamp = time.time()
self.recording = True
def stopRecording(self):
self.timestamp = 0
self.recording = False
self.counter = 0
def clear(self):
tmp = self.data
self.data[:self.buffer_size] = tmp
self.counter = 0
global_buffer = GlobalBuffer()
class RecordingPlotter(pg.PlotWidget):
"""Plots sub data from real time plotter.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., main_window=None, parent=None):
super(RecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Recorded data')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
self.main_window = main_window
global global_buffer
def initData(self):
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.x = np.linspace(0.0, self.time_window, self._bufsize)
self.setDownsampling(mode='peak')
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.setClipToView(True)
self.data = np.empty(5)
self.ptr = 0
self.counter = 0
self.curve = self.plot(self.x[:self.ptr], self.data[:self.ptr], antialias=True)
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
def setSampleInterval(self, sample_interval):
self.sample_interval = sample_interval
def setTimeWindow(self, time_window):
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
if global_buffer.time_limit != 0 and self.x[self.ptr] >= global_buffer.time_limit:
# TODO: this is not a good way to stop because you need the parent,
# and the parents stop method calls your methods.
# We need to thing about something different here.
self.main_window.stop()
while (self.counter > global_buffer.counter and self.counter > 0):
self.counter -= 1
return global_buffer.data[(self.counter % global_buffer.buffer_size)]
def updateplot(self):
"""Update plot."""
self.data[self.ptr] = self.getdata()
self.x[self.ptr + 1] = self.x[self.ptr] + self.sample_interval
self.ptr += 1
self.counter += 1
if self.ptr >= self.data.shape[0]:
tmp = self.data
xtmp = self.x
self.data = np.empty(self.data.shape[0] + 5)
self.x = np.empty(self.x.shape[0] + 5)
self.data[:tmp.shape[0]] = tmp
self.x[:xtmp.shape[0]] = xtmp
self.curve.setData(self.x[:self.ptr], self.data[:self.ptr])
def setCurveColor(self, r, g, b):
"""Set curve color"""
self.curve.setPen(pg.mkPen(color=(r, g, b)))
class RealTimeRecordingPlotter(pg.PlotWidget):
"""Plots data (audio) in real time.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., parent=None):
super(RealTimeRecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Input Real Time')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
global global_buffer
def initData(self):
"""Initialize data for for plotting."""
# self.sample_interval = 0.01
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.x = np.linspace(-self.time_window, 0.0, self._bufsize)
self.y = np.zeros(self._bufsize, dtype=np.float)
# Initializes audio listener
self.audio = AudioRecord("output.wav", self.sample_interval)
try:
self.audio.begin_audio()
except IOError as e:
QMessageBox.information(self,
self.tr('Information'),
self.tr('No input device found, please make sure to plug it before open the '
'program. Please, restart the program and try again.\n{}'.format(e)),
QMessageBox.Ok)
exit(1)
# :TODO: needs to be separated the interval of plotting data from the acquire data.
# Initializes the timer
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
# Plot for the first time
self.curve = self.plot(self.x, self.y, pen=(0, 255, 255), antialias=True)
self.curve.clear()
def setSampleInterval(self, sample_interval):
"""Sets the sample interval for plotting.
Parameters:
sample_interval (float): sample interval in seconds
"""
self.sample_interval = sample_interval
self.curve.clear()
self.initData()
def setTimeWindow(self, time_window):
"""Sets the time window for plotting.
Parameters:
time_window (float): size (in time) for the main window, in seconds.
"""
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
"""Gets data for plotting."""
b = self.audio.get_data_from_audio()[1]
new = b[0]
# This clipping of the signal prevents pyqtgraph from breaking due
# to large random noise when some soundcards are initiated.
# Prevents input overflow when program starts.
if new > 1e+150:
new = 1e+150
if global_buffer.recording is True:
global_buffer.counter += 1
if global_buffer.counter >= global_buffer.buffer_size:
global_buffer.clear()
global_buffer.data[global_buffer.counter] = new
return new
def updateplot(self):
"""Update plot."""
stp = self.getdata()
self.databuffer.append(stp)
self.y[:] = self.databuffer
self.curve.setData(self.x, self.y)
class MainWindow(QMainWindow):
"""Main window class.
Parameters:
parent (QWidget): parent
"""
def __init__(self, parent=None):
global global_buffer
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Check new version
self.setWindowTitle(app_name + ' ' + version)
self.update = "NOT CHECKED ..."
self.checkUpdate()
self.filepath = ""
# Initial state is none because there is no data acquired yet
self.isSaved = None
# Sample interval should be 0.02s to not overflow in XP
self.ui.doubleSpinBoxSampleInterval.setMinimum(0.02)
self.ui.doubleSpinBoxSampleInterval.setMaximum(0.5)
self.ui.doubleSpinBoxSampleInterval.setValue(0.02)
self.ui.doubleSpinBoxSampleInterval.setSingleStep(0.01)
# Connecting actions
# File actions
# self.ui.actionNew.triggered.connect(self.newFile)
# For now it cannot open a file
# self.ui.actionOpen.triggered.connect(self.openFile)
# self.ui.actionSave.triggered.connect(self.saveFile)
self.ui.actionSave_As.triggered.connect(self.saveFileAs)
self.ui.actionSave_As.setEnabled(False)
self.ui.actionPrint_graph.triggered.connect(self.saveImageAs)
self.ui.actionPrint_graph.setEnabled(False)
# Acquire actions
self.ui.actionRecord.triggered.connect(self.record)
self.ui.actionRecord.setCheckable(True)
self.ui.actionPause.triggered.connect(self.pause)
self.ui.actionPause.setCheckable(True)
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.triggered.connect(self.stop)
self.ui.actionStop.setEnabled(False)
# Tools actions
self.ui.actionConvert_Wav_to_Dat.triggered.connect(self.callTools)
# Program actions
self.ui.actionQuit.triggered.connect(self.close)
self.ui.actionAbout_Wavy.triggered.connect(self.about)
# Plot widget
self.plot_widget = RealTimeRecordingPlotter(sample_interval=0.02, time_window=20.)
self.plot_widget.initData()
self.ui.gridLayout_2.addWidget(self.plot_widget, 0, 1)
self.plot_widget_rec = RecordingPlotter(sample_interval=0.02, time_window=5., main_window=self)
self.ui.gridLayout_2.addWidget(self.plot_widget_rec, 1, 1)
# Inputs
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget_rec.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.setSampleRate)
# self.ui.doubleSpinBoxSampleRate.valueChanged.connect(self.setSampleInterval)
self.ui.spinBoxWindowTime.valueChanged.connect(self.plot_widget.setTimeWindow)
self.setSampleRate(self.ui.doubleSpinBoxSampleInterval.value())
def checkUpdate(self):
"""Check update from internet."""
url = 'https://api.github.com/repos/dpizetta/wavy/releases/latest'
try:
response = urllib.request.urlopen(url, timeout=20)
tag_version = json.loads(response.read())
except Exception:
pass
else:
if str(version) >= str(tag_version['tag_name'][1:]):
self.update = "Up to date!"
else:
self.update = "New version ({}) is available!".format(str(tag_version['tag_name']))
QMessageBox.information(self,
self.tr('Information'),
self.tr('<p>Oh, there is a new version ({}) avaliable.\n'
'Go to <a href="https://github.com/dpizetta/wavy/releases/latest">'
'download!</a></p>.'.format(tag_version['tag_name'])),
QMessageBox.Ok)
self.ui.labelAbout.setText(self.tr(about + "\nVersion status: " + self.update))
def setSampleRate(self, sample_interval):
"""Sets sample rate."""
self.ui.doubleSpinBoxSampleRate.setValue(1. / sample_interval)
def setSampleInterval(self, sample_rate):
"""Sets sample interval."""
self.ui.doubleSpinBoxSampleInterval.setValue(1. / sample_rate)
def callTools(self):
"""Call converting tool."""
dlg = ConvertWave2Data()
dlg.exec_()
def createFileName(self):
"""Construct a new file name to save the data."""
# Creates auto naming filename
filename = 'new_wavy_data_' + time.strftime("%Y%m%d%H%M%S", time.gmtime())
# Gets the current directory
# base_path = os.path.abspath(".")
self.filepath = os.path.join(self.base_path, filename)
self.setWindowFilePath(self.filepath)
def record(self):
"""Starts acquiring."""
# Create a new filename for the current acquisition
self.createFileName()
# Checks if is saved before start a new recording
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before start a new record?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.curve.clear()
self.plot_widget_rec.initData()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
# Set enabled buttons
self.ui.actionPause.setEnabled(True)
self.ui.actionStop.setEnabled(True)
self.ui.actionRecord.setEnabled(False)
# Set enabled inputs
self.ui.spinBoxWindowTime.setEnabled(False)
self.ui.doubleSpinBoxSampleInterval.setEnabled(False)
self.ui.doubleSpinBoxSampleRate.setEnabled(False)
self.ui.spinBoxStopRecordingAfter.setEnabled(False)
# Set enabled tool bar and menu
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
global_buffer.time_limit = self.ui.spinBoxStopRecordingAfter.value()
global_buffer.startRecording()
self.isSaved = False
def pause(self):
"""Pauses acquiring."""
# TODO: We need to discuss if this is needed
# because the time is not correctly saved
if self.ui.actionPause.isChecked():
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(255, 153, 0)
self.plot_widget_rec.setLabel('top', 'Paused ...')
global_buffer.stopRecording()
else:
# Starting changing color and label
self.plot_widget_rec.timer.start()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
global_buffer.startRecording()
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
def stop(self):
"""Stops acquiring."""
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(0, 255, 0)
self.plot_widget_rec.setLabel('top', 'Stopped ...')
# Set checked
self.ui.actionRecord.setChecked(False)
self.ui.actionPause.setChecked(False)
# Set enabled buttons
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.setEnabled(False)
self.ui.actionRecord.setEnabled(True)
# Set enabled inputs
self.ui.doubleSpinBoxSampleInterval.setEnabled(True)
self.ui.doubleSpinBoxSampleRate.setEnabled(True)
self.ui.spinBoxWindowTime.setEnabled(True)
self.ui.spinBoxStopRecordingAfter.setEnabled(True)
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(True)
self.ui.menuFile.setEnabled(True)
self.ui.menuTools.setEnabled(True)
self.ui.actionSave_As.setEnabled(True)
self.ui.actionPrint_graph.setEnabled(True)
global_buffer.stopRecording()
def savePNGFile(self, filepath):
"""Saves an image."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".png"
logging.info('File path to save image: %s', filepath)
self.plot_widget_rec.setBackground('w')
exporter = pg.exporters.ImageExporter(self.plot_widget_rec.plotItem)
self.plot_widget_rec.setBackground('k')
exporter.export(filepath)
def saveCSVFile(self, filepath):
"""Saves a data file."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".csv"
logging.info('File path to save data: %s', filepath)
exporter = pg.exporters.CSVExporter(self.plot_widget_rec.plotItem)
exporter.export(filepath)
def getDataFolder(self):
"""Get data folder option."""
answer = QMessageBox.question(self,
self.tr('Question'),
self.tr('It seems the first time you run WavyTool. Do you want to choose '
'a folder to keep exported data?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
path = QFileDialog.getExistingDirectory(self,
self.tr('Data folder'),
os.path.expanduser('~'))
if path:
try:
# This string converting is needed because the return is a QString
self.base_path = os.path.splitext(str(path))[0]
with open('wavytool.config', 'w') as outfile:
json.dump({'data_folder': self.base_path}, outfile)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem set the default folder to save data:\n '
'{}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The default folder is: %s', self.base_path)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Default folder to save data was set up to:\n'
'{}.'.format(self.base_path)),
QMessageBox.Ok)
else:
self.base_path = '.'
def saveImageAs(self):
"""Saves image as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Export recorded image ...'),
os.path.splitext(self.filepath)[0] + '.png',
self.tr("Image File (*.png)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.savePNGFile(self.filepath)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save image:\n {}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The image was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Image was successfully exported.'),
QMessageBox.Ok)
def saveFileAs(self):
"""Saves data file as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Save recorded data ...'),
os.path.splitext(self.filepath)[0] + '.csv',
self.tr("Data File (*.csv)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.saveCSVFile(self.filepath)
except Exception as e:
self.isSaved = False
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save data\n {}'.format(str(e))),
QMessageBox.Ok)
else:
self.isSaved = True
# self.ui.actionSave_As.setEnabled(False)
logging.info('The data was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Data was successfully saved.\n\nDATA SAVED IS REPRESENTED '
'BY THE WINDOW RECORDING, IF YOU APPLY ZOOM ON IT, JUST '
'DATA VISIBLE WILL BE SAVED!'),
QMessageBox.Ok)
def about(self):
"""Show the dialog about."""
QMessageBox.about(self, self.tr('About'),
self.tr(about))
def closeQuestion(self):
"""Asks about to close."""
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before exit?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
answer = QMessageBox.question(self,
self.tr('Close'),
self.tr('Do you want to exit?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
return answer == QMessageBox.Yes
def closeEvent(self, event):
"""Re implements close event."""
if self.closeQuestion():
self.plot_widget.timer.stop()
self.plot_widget.audio.end_audio()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.timer.stop()
event.accept()
else:
event.ignore()
|
[
"qdarkstyle.load_stylesheet_from_environment",
"numpy.empty",
"pyqtgraph.exporters.CSVExporter",
"os.path.join",
"collections.deque",
"os.path.expanduser",
"qtpy.QtWidgets.QSplashScreen",
"logging.warning",
"qtpy.QtCore.QTimer",
"numpy.linspace",
"pyqtgraph.mkPen",
"wavytool.mw_wavy.Ui_MainWindow",
"json.dump",
"qtpy.QtGui.QPixmap",
"time.sleep",
"wavytool.gui_wav2dat.ConvertWave2Data",
"pyqtgraph.exporters.ImageExporter",
"json.load",
"logging.basicConfig",
"time.gmtime",
"numpy.zeros",
"wavytool.core_wavy.AudioRecord",
"time.time",
"logging.info",
"os.path.splitext",
"qtpy.QtWidgets.QApplication"
] |
[((1169, 1209), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1188, 1209), False, 'import logging\n'), ((1242, 1355), 'logging.info', 'logging.info', (['"""Using Qt binding (QtPy/PyQtGraph): %s"""', "(os.environ['QT_API'], os.environ['PYQTGRAPH_QT_LIB'])"], {}), "('Using Qt binding (QtPy/PyQtGraph): %s', (os.environ['QT_API'],\n os.environ['PYQTGRAPH_QT_LIB']))\n", (1254, 1355), False, 'import logging\n'), ((1920, 1938), 'qtpy.QtWidgets.QApplication', 'QApplication', (['args'], {}), '(args)\n', (1932, 1938), False, 'from qtpy.QtWidgets import QApplication, QFileDialog, QMainWindow, QMessageBox, QSplashScreen\n'), ((2537, 2574), 'qtpy.QtGui.QPixmap', 'QPixmap', (['"""wavytool/images/symbol.png"""'], {}), "('wavytool/images/symbol.png')\n", (2544, 2574), False, 'from qtpy.QtGui import QPixmap\n'), ((2588, 2609), 'qtpy.QtWidgets.QSplashScreen', 'QSplashScreen', (['pixmap'], {}), '(pixmap)\n', (2601, 2609), False, 'from qtpy.QtWidgets import QApplication, QFileDialog, QMainWindow, QMessageBox, QSplashScreen\n'), ((2623, 2634), 'time.time', 'time.time', ([], {}), '()\n', (2632, 2634), False, 'import time\n'), ((2781, 2798), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2791, 2798), False, 'import time\n'), ((3460, 3486), 'numpy.empty', 'np.empty', (['self.buffer_size'], {}), '(self.buffer_size)\n', (3468, 3486), True, 'import numpy as np\n'), ((3623, 3634), 'time.time', 'time.time', ([], {}), '()\n', (3632, 3634), False, 'import time\n'), ((5008, 5057), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self.time_window', 'self._bufsize'], {}), '(0.0, self.time_window, self._bufsize)\n', (5019, 5057), True, 'import numpy as np\n'), ((5126, 5181), 'collections.deque', 'collections.deque', (['([0.0] * self._bufsize)', 'self._bufsize'], {}), '([0.0] * self._bufsize, self._bufsize)\n', (5143, 5181), False, 'import collections\n'), ((5235, 5246), 'numpy.empty', 'np.empty', (['(5)'], {}), '(5)\n', (5243, 5246), True, 'import numpy as np\n'), ((5402, 5410), 'qtpy.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (5408, 5410), False, 'from qtpy.QtCore import QTimer\n'), ((8113, 8168), 'collections.deque', 'collections.deque', (['([0.0] * self._bufsize)', 'self._bufsize'], {}), '([0.0] * self._bufsize, self._bufsize)\n', (8130, 8168), False, 'import collections\n'), ((8186, 8236), 'numpy.linspace', 'np.linspace', (['(-self.time_window)', '(0.0)', 'self._bufsize'], {}), '(-self.time_window, 0.0, self._bufsize)\n', (8197, 8236), True, 'import numpy as np\n'), ((8254, 8293), 'numpy.zeros', 'np.zeros', (['self._bufsize'], {'dtype': 'np.float'}), '(self._bufsize, dtype=np.float)\n', (8262, 8293), True, 'import numpy as np\n'), ((8353, 8400), 'wavytool.core_wavy.AudioRecord', 'AudioRecord', (['"""output.wav"""', 'self.sample_interval'], {}), "('output.wav', self.sample_interval)\n", (8364, 8400), False, 'from wavytool.core_wavy import AudioRecord\n'), ((9030, 9038), 'qtpy.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (9036, 9038), False, 'from qtpy.QtCore import QTimer\n'), ((10961, 10976), 'wavytool.mw_wavy.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (10974, 10976), False, 'from wavytool.mw_wavy import Ui_MainWindow\n'), ((15190, 15208), 'wavytool.gui_wav2dat.ConvertWave2Data', 'ConvertWave2Data', ([], {}), '()\n', (15206, 15208), False, 'from wavytool.gui_wav2dat import ConvertWave2Data\n'), ((15544, 15582), 'os.path.join', 'os.path.join', (['self.base_path', 'filename'], {}), '(self.base_path, filename)\n', (15556, 15582), False, 'import os\n'), ((19381, 19434), 'logging.info', 'logging.info', (['"""File path to save image: %s"""', 'filepath'], {}), "('File path to save image: %s', filepath)\n", (19393, 19434), False, 'import logging\n'), ((19502, 19559), 'pyqtgraph.exporters.ImageExporter', 'pg.exporters.ImageExporter', (['self.plot_widget_rec.plotItem'], {}), '(self.plot_widget_rec.plotItem)\n', (19528, 19559), True, 'import pyqtgraph as pg\n'), ((19853, 19905), 'logging.info', 'logging.info', (['"""File path to save data: %s"""', 'filepath'], {}), "('File path to save data: %s', filepath)\n", (19865, 19905), False, 'import logging\n'), ((19925, 19980), 'pyqtgraph.exporters.CSVExporter', 'pg.exporters.CSVExporter', (['self.plot_widget_rec.plotItem'], {}), '(self.plot_widget_rec.plotItem)\n', (19949, 19980), True, 'import pyqtgraph as pg\n'), ((2225, 2314), 'logging.warning', 'logging.warning', (['"""No dark theme installed, use \'pip install qdarkstyle\' to install."""'], {}), '(\n "No dark theme installed, use \'pip install qdarkstyle\' to install.")\n', (2240, 2314), False, 'import logging\n'), ((2748, 2759), 'time.time', 'time.time', ([], {}), '()\n', (2757, 2759), False, 'import time\n'), ((3034, 3054), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3043, 3054), False, 'import json\n'), ((3118, 3170), 'logging.info', 'logging.info', (['"""Data folder is: %s"""', 'window.base_path'], {}), "('Data folder is: %s', window.base_path)\n", (3130, 3170), False, 'import logging\n'), ((6616, 6648), 'numpy.empty', 'np.empty', (['(self.data.shape[0] + 5)'], {}), '(self.data.shape[0] + 5)\n', (6624, 6648), True, 'import numpy as np\n'), ((6670, 6699), 'numpy.empty', 'np.empty', (['(self.x.shape[0] + 5)'], {}), '(self.x.shape[0] + 5)\n', (6678, 6699), True, 'import numpy as np\n'), ((6952, 6977), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(r, g, b)'}), '(color=(r, g, b))\n', (6960, 6977), True, 'import pyqtgraph as pg\n'), ((2364, 2409), 'qdarkstyle.load_stylesheet_from_environment', 'qdarkstyle.load_stylesheet_from_environment', ([], {}), '()\n', (2407, 2409), False, 'import qdarkstyle\n'), ((15425, 15438), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (15436, 15438), False, 'import time\n'), ((20678, 20701), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (20696, 20701), False, 'import os\n'), ((22842, 22908), 'logging.info', 'logging.info', (['"""The image was saved in the file: %s"""', 'self.filepath'], {}), "('The image was saved in the file: %s', self.filepath)\n", (22854, 22908), False, 'import logging\n'), ((24194, 24259), 'logging.info', 'logging.info', (['"""The data was saved in the file: %s"""', 'self.filepath'], {}), "('The data was saved in the file: %s', self.filepath)\n", (24206, 24259), False, 'import logging\n'), ((21471, 21528), 'logging.info', 'logging.info', (['"""The default folder is: %s"""', 'self.base_path'], {}), "('The default folder is: %s', self.base_path)\n", (21483, 21528), False, 'import logging\n'), ((22168, 22199), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (22184, 22199), False, 'import os\n'), ((23392, 23423), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (23408, 23423), False, 'import os\n'), ((20990, 21041), 'json.dump', 'json.dump', (["{'data_folder': self.base_path}", 'outfile'], {}), "({'data_folder': self.base_path}, outfile)\n", (20999, 21041), False, 'import json\n')]
|
# coding=utf-8
import argparse
from RedisTCPClient import RedisTCPClient
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
required=True,
help='host name of redis.'
)
parser.add_argument(
'--port',
required=True,
help='port number of redis.'
)
args = parser.parse_args()
host, port = args.host, args.port
client = RedisTCPClient(host, int(port))
help_info = (
'Input redis command and press enter to execute.\n'
'Input \'exit\' or \'quit\' to stop this program.\n'
)
print(help_info)
while True:
command = input('> ')
if command in ('exit', 'quit'): break
print(client.run_command(command))
|
[
"argparse.ArgumentParser"
] |
[((117, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (140, 142), False, 'import argparse\n')]
|
import argparse
import logging
import os
import random
import socket
import subprocess
import sys
import tempfile
import requests
import retrying
from kubernetes import client, config
from kubernetes.client.rest import ApiException
WAIT_TIMEOUT_SEC: int = 5 * 60
WAIT_SLEEP_SEC: int = 5
# Initialization & basic validation
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.NOTSET)
logging.getLogger("kubernetes").setLevel(logging.INFO)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description='Service discovery and KV store using consul and friend.')
parser.add_argument('kubeconfig', help='Full path to the configuration of the development Kubernetes cluster')
args = parser.parse_args()
config.load_kube_config(config_file=args.kubeconfig)
v1 = client.CoreV1Api()
# It is required because for this demo local volume is used
# Local volumes are required affinity settings
local_volume_node_hostname = ""
try:
list_nodes = v1.list_node()
for node in list_nodes.items:
if node.spec.unschedulable:
continue
labels = dict(node.metadata.labels)
local_volume_node_hostname = labels["kubernetes.io/hostname"]
break
except Exception as e:
logger.error("Unable to list namespaces of the current Kubernetes cluster: %s", e, exc_info=1)
exit(-1)
logger.info("Selected node with kubernetes.io/hostname '%s' for the local volume", local_volume_node_hostname)
def wait_for_statefulset_available_replicas(name,
namespace,
count=1,
timeout_sec=WAIT_TIMEOUT_SEC,
wait_sec=WAIT_SLEEP_SEC):
def _replicas_available(resource, count):
return (resource is not None and resource.status.ready_replicas is not None
and resource.status.ready_replicas >= count)
@retrying.retry(retry_on_result=lambda r: not _replicas_available(r, count),
stop_max_delay=timeout_sec * 1000,
wait_fixed=wait_sec * 1000)
def _wait_for_statefulset_available_replicas():
statefulset = client.AppsV1Api().read_namespaced_stateful_set(name, namespace)
logger.debug('Waiting for statefulset %s to have %s available '
'replicas, current count %s', statefulset.metadata.name, count, statefulset.status.ready_replicas)
return statefulset
logger.info("Waiting for statefulset {}".format(name))
_wait_for_statefulset_available_replicas()
def run(cmd, dir=".", env=os.environ.copy()):
logger.info(">>>>>>>> run '{}' at '{}'".format(cmd, dir))
p = subprocess.Popen(cmd, cwd=dir, shell=True, env=env, stdout=sys.stdout, stderr=sys.stderr)
p.wait()
def background(cmd, dir="."):
logger.info(">>>>>>>> spawn '{}' at '{}'".format(cmd, dir))
return subprocess.Popen(cmd, cwd=dir, shell=True, stdout=sys.stdout, stderr=sys.stderr)
def pick_random_port():
port = 0
random.seed()
for i in range(8):
port = random.randint(10000, 60000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if s.connect_ex(('127.0.0.1', port)) == 0:
s.close()
else:
break
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.bind(('127.0.0.1', port))
except Exception:
break
s.close()
if port == 0:
logging.error('FAIL')
sys.exit('Could not find a random free port between 10000 and 60000')
return port
def download_slave_jar(url, timeout_sec=WAIT_TIMEOUT_SEC, wait_sec=WAIT_SLEEP_SEC):
@retrying.retry(stop_max_delay=timeout_sec * 1000, wait_fixed=wait_sec * 1000)
def _download_slave_jar():
logging.info("download slave.jar from %s", url)
r = requests.get(url, allow_redirects=True)
slave_jar_file, slave_jar_filename = tempfile.mkstemp(".jar", "slave_")
os.write(slave_jar_file, r.content)
os.close(slave_jar_file)
logging.info("save to %s", slave_jar_filename)
return slave_jar_filename
return _download_slave_jar()
def ping_microservice(url, timeout_sec=WAIT_TIMEOUT_SEC, wait_sec=WAIT_SLEEP_SEC):
@retrying.retry(stop_max_delay=timeout_sec * 1000, wait_fixed=wait_sec * 1000)
def _ping_microservice():
logging.info("ping %s", url)
r = requests.get(url, allow_redirects=True)
if r.status_code != 200:
raise Exception("unreachable")
_ping_microservice()
print("\n\nYou could set the next environment variables:")
print("JENKINS_UI_PORT")
print("JENKINS_AGENT_PORT")
print("CONSUL_UI_PORT")
print("CONSUL_DNS_PORT")
# Update kubernetes resources
jenkins_ui_port = os.getenv("JENKINS_UI_PORT", pick_random_port())
jenkins_agent_port = os.getenv("JENKINS_AGENT_PORT", pick_random_port())
consul_ui_port = os.getenv("CONSUL_UI_PORT", pick_random_port())
consul_dns_port = os.getenv("CONSUL_DNS_PORT", pick_random_port())
# Terraform
run("terraform init", "./terraform/dev")
terraform_env = os.environ.copy()
terraform_env["TF_VAR_jenkins_ui_port"] = str(jenkins_ui_port)
terraform_env["TF_VAR_jenkins_agent_port"] = str(jenkins_agent_port)
terraform_env["TF_VAR_local_volume_node_hostname"] = local_volume_node_hostname
terraform_env["TF_VAR_consul_ui_port"] = str(consul_ui_port)
terraform_env["TF_VAR_consul_dns_port"] = str(consul_dns_port)
run("terraform apply -auto-approve", "./terraform/dev", terraform_env)
# Kubernetes
wait_for_statefulset_available_replicas("jenkins", "jenkins")
jenkins_ui = background("kubectl -n jenkins port-forward svc/jenkins {}".format(jenkins_ui_port))
jenkins_agent = background("kubectl -n jenkins port-forward svc/jenkins-agent {}".format(jenkins_agent_port))
slave_jar = download_slave_jar("http://localhost:{}/jnlpJars/slave.jar".format(jenkins_ui_port))
slave_connection_url = "http://localhost:{}/computer/localhost/jenkins-agent.jnlp".format(jenkins_ui_port)
jenkins_slave = background("java -jar {} -jnlpUrl {} -jnlpCredentials admin:admin".format(slave_jar, slave_connection_url))
ping_microservice("http://localhost:9003/ping")
print("\n\n\nMicroservice is deployed\n")
print("set keys: curl 'localhost:9003/set-value?b=x&a=y'")
print("get keys: curl 'localhost:9003/get-value?b&a'")
print("Jenkins UI: localhost:{} (admin:admin)".format(jenkins_ui_port))
print("Consul UI: localhost:{}".format(consul_ui_port))
print("Consul DNS: localhost:{}".format(consul_dns_port))
print("test service discovery: dig @localhost -p {} jenkins-consul-friends.service.consul".format(consul_dns_port))
input("Press Enter to finish...\n\n\n")
jenkins_slave.kill()
jenkins_ui.kill()
jenkins_agent.kill()
jenkins_slave.wait()
jenkins_ui.wait()
jenkins_agent.wait()
|
[
"argparse.ArgumentParser",
"os.environ.copy",
"socket.socket",
"os.close",
"logging.error",
"random.randint",
"kubernetes.client.CoreV1Api",
"kubernetes.client.AppsV1Api",
"random.seed",
"requests.get",
"retrying.retry",
"subprocess.Popen",
"os.write",
"sys.exit",
"logging.basicConfig",
"tempfile.mkstemp",
"kubernetes.config.load_kube_config",
"logging.info",
"logging.getLogger"
] |
[((326, 423), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.NOTSET'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.NOTSET)\n", (345, 423), False, 'import logging\n'), ((484, 509), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (501, 509), False, 'import logging\n'), ((550, 649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Service discovery and KV store using consul and friend."""'}), "(description=\n 'Service discovery and KV store using consul and friend.')\n", (573, 649), False, 'import argparse\n'), ((783, 835), 'kubernetes.config.load_kube_config', 'config.load_kube_config', ([], {'config_file': 'args.kubeconfig'}), '(config_file=args.kubeconfig)\n', (806, 835), False, 'from kubernetes import client, config\n'), ((841, 859), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (857, 859), False, 'from kubernetes import client, config\n'), ((5176, 5193), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5191, 5193), False, 'import os\n'), ((2674, 2691), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2689, 2691), False, 'import os\n'), ((2764, 2857), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'shell': '(True)', 'env': 'env', 'stdout': 'sys.stdout', 'stderr': 'sys.stderr'}), '(cmd, cwd=dir, shell=True, env=env, stdout=sys.stdout,\n stderr=sys.stderr)\n', (2780, 2857), False, 'import subprocess\n'), ((2974, 3059), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'dir', 'shell': '(True)', 'stdout': 'sys.stdout', 'stderr': 'sys.stderr'}), '(cmd, cwd=dir, shell=True, stdout=sys.stdout, stderr=sys.stderr\n )\n', (2990, 3059), False, 'import subprocess\n'), ((3098, 3111), 'random.seed', 'random.seed', ([], {}), '()\n', (3109, 3111), False, 'import random\n'), ((3755, 3832), 'retrying.retry', 'retrying.retry', ([], {'stop_max_delay': '(timeout_sec * 1000)', 'wait_fixed': '(wait_sec * 1000)'}), '(stop_max_delay=timeout_sec * 1000, wait_fixed=wait_sec * 1000)\n', (3769, 3832), False, 'import retrying\n'), ((4341, 4418), 'retrying.retry', 'retrying.retry', ([], {'stop_max_delay': '(timeout_sec * 1000)', 'wait_fixed': '(wait_sec * 1000)'}), '(stop_max_delay=timeout_sec * 1000, wait_fixed=wait_sec * 1000)\n', (4355, 4418), False, 'import retrying\n'), ((420, 451), 'logging.getLogger', 'logging.getLogger', (['"""kubernetes"""'], {}), "('kubernetes')\n", (437, 451), False, 'import logging\n'), ((3150, 3178), 'random.randint', 'random.randint', (['(10000)', '(60000)'], {}), '(10000, 60000)\n', (3164, 3178), False, 'import random\n'), ((3191, 3240), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3204, 3240), False, 'import socket\n'), ((3358, 3406), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (3371, 3406), False, 'import socket\n'), ((3548, 3569), 'logging.error', 'logging.error', (['"""FAIL"""'], {}), "('FAIL')\n", (3561, 3569), False, 'import logging\n'), ((3578, 3647), 'sys.exit', 'sys.exit', (['"""Could not find a random free port between 10000 and 60000"""'], {}), "('Could not find a random free port between 10000 and 60000')\n", (3586, 3647), False, 'import sys\n'), ((3872, 3919), 'logging.info', 'logging.info', (['"""download slave.jar from %s"""', 'url'], {}), "('download slave.jar from %s', url)\n", (3884, 3919), False, 'import logging\n'), ((3932, 3971), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (3944, 3971), False, 'import requests\n'), ((4017, 4051), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".jar"""', '"""slave_"""'], {}), "('.jar', 'slave_')\n", (4033, 4051), False, 'import tempfile\n'), ((4060, 4095), 'os.write', 'os.write', (['slave_jar_file', 'r.content'], {}), '(slave_jar_file, r.content)\n', (4068, 4095), False, 'import os\n'), ((4104, 4128), 'os.close', 'os.close', (['slave_jar_file'], {}), '(slave_jar_file)\n', (4112, 4128), False, 'import os\n'), ((4137, 4183), 'logging.info', 'logging.info', (['"""save to %s"""', 'slave_jar_filename'], {}), "('save to %s', slave_jar_filename)\n", (4149, 4183), False, 'import logging\n'), ((4457, 4485), 'logging.info', 'logging.info', (['"""ping %s"""', 'url'], {}), "('ping %s', url)\n", (4469, 4485), False, 'import logging\n'), ((4498, 4537), 'requests.get', 'requests.get', (['url'], {'allow_redirects': '(True)'}), '(url, allow_redirects=True)\n', (4510, 4537), False, 'import requests\n'), ((2255, 2273), 'kubernetes.client.AppsV1Api', 'client.AppsV1Api', ([], {}), '()\n', (2271, 2273), False, 'from kubernetes import client, config\n')]
|
from torchvision import models
import pretrainedmodels
import torch
from torch import nn
__all__ = ['resnet50', 'resnet152', 'alexnet', 'vgg', 'densenet',
'inception', 'xception', 'nasnetalarge', 'nasnetamobile',
"yolov5s", "yolov5m"]
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def nasnetalarge(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model nasnet_large
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.nasnetalarge(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def nasnetamobile(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model nasnet_mobile
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.nasnetamobile(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def xception(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model xception
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet')
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 299
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
return model, input_size, mean, std
def inception(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model inception_v3
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
# Handle the auxilary net
num_ftrs = model.AuxLogits.fc.in_features
model.AuxLogits.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
# Handle the primary net
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 299
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def densenet(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model densenet
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier.in_features
model.classifier = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def vgg(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model vgg
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def alexnet(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model alexnet
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def resnet50(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model resnet50
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def resnet152(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model resnet152
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = models.resnet152(pretrained=use_pretrained)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def yolov5s(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model yolo v5
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=num_classes)
set_parameter_requires_grad(model, feature_extract)
num_ftrs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(num_ftrs, num_classes),
nn.Sigmoid())
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
def yolov5m(num_classes, feature_extract, use_pretrained=True):
"""Funkcja zwracająca model yolo v5
:param num_classes: liczba szukanych cech
:type num_classes: int
:param feature_extract: czy ekstrachować cechy
:type feature_extract: bool
:param use_pretrained: czy używać wstępnie przetrenowanego modelu
:type use_pretrained: bool
:return: wygenerowny model,wielkość wejścia modelu,sugerowana średnia do normalizacji,sugerowane odchylenie standardowe do normalizacji
:rtype: model, int, float, float
"""
model = torch.hub.load('ultralytics/yolov5', 'yolov5m', pretrained=True, channels=num_classes)
set_parameter_requires_grad(model, feature_extract)
input_size = 224
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
return model, input_size, mean, std
|
[
"pretrainedmodels.nasnetalarge",
"pretrainedmodels.nasnetamobile",
"torchvision.models.vgg11_bn",
"torchvision.models.densenet121",
"torchvision.models.alexnet",
"pretrainedmodels.xception",
"torchvision.models.inception_v3",
"torchvision.models.resnet50",
"torch.nn.Linear",
"torch.hub.load",
"torchvision.models.resnet152",
"torch.nn.Sigmoid"
] |
[((1000, 1070), 'pretrainedmodels.nasnetalarge', 'pretrainedmodels.nasnetalarge', ([], {'num_classes': '(1000)', 'pretrained': '"""imagenet"""'}), "(num_classes=1000, pretrained='imagenet')\n", (1029, 1070), False, 'import pretrainedmodels\n'), ((1959, 2030), 'pretrainedmodels.nasnetamobile', 'pretrainedmodels.nasnetamobile', ([], {'num_classes': '(1000)', 'pretrained': '"""imagenet"""'}), "(num_classes=1000, pretrained='imagenet')\n", (1989, 2030), False, 'import pretrainedmodels\n'), ((2909, 2975), 'pretrainedmodels.xception', 'pretrainedmodels.xception', ([], {'num_classes': '(1000)', 'pretrained': '"""imagenet"""'}), "(num_classes=1000, pretrained='imagenet')\n", (2934, 2975), False, 'import pretrainedmodels\n'), ((3859, 3905), 'torchvision.models.inception_v3', 'models.inception_v3', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (3878, 3905), False, 'from torchvision import models\n'), ((4987, 5032), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (5005, 5032), False, 'from torchvision import models\n'), ((5911, 5953), 'torchvision.models.vgg11_bn', 'models.vgg11_bn', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (5926, 5953), False, 'from torchvision import models\n'), ((6846, 6887), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (6860, 6887), False, 'from torchvision import models\n'), ((7782, 7824), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (7797, 7824), False, 'from torchvision import models\n'), ((8699, 8742), 'torchvision.models.resnet152', 'models.resnet152', ([], {'pretrained': 'use_pretrained'}), '(pretrained=use_pretrained)\n', (8715, 8742), False, 'from torchvision import models\n'), ((9613, 9704), 'torch.hub.load', 'torch.hub.load', (['"""ultralytics/yolov5"""', '"""yolov5s"""'], {'pretrained': '(True)', 'channels': 'num_classes'}), "('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=\n num_classes)\n", (9627, 9704), False, 'import torch\n'), ((10570, 10661), 'torch.hub.load', 'torch.hub.load', (['"""ultralytics/yolov5"""', '"""yolov5m"""'], {'pretrained': '(True)', 'channels': 'num_classes'}), "('ultralytics/yolov5', 'yolov5m', pretrained=True, channels=\n num_classes)\n", (10584, 10661), False, 'import torch\n'), ((1219, 1251), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (1228, 1251), False, 'from torch import nn\n'), ((1261, 1273), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1271, 1273), False, 'from torch import nn\n'), ((2179, 2211), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (2188, 2211), False, 'from torch import nn\n'), ((2221, 2233), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2231, 2233), False, 'from torch import nn\n'), ((3124, 3156), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (3133, 3156), False, 'from torch import nn\n'), ((3166, 3178), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3176, 3178), False, 'from torch import nn\n'), ((4086, 4118), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (4095, 4118), False, 'from torch import nn\n'), ((4128, 4140), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4138, 4140), False, 'from torch import nn\n'), ((4245, 4277), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (4254, 4277), False, 'from torch import nn\n'), ((4287, 4299), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4297, 4299), False, 'from torch import nn\n'), ((5179, 5211), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (5188, 5211), False, 'from torch import nn\n'), ((5221, 5233), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5231, 5233), False, 'from torch import nn\n'), ((6106, 6138), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (6115, 6138), False, 'from torch import nn\n'), ((6148, 6160), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6158, 6160), False, 'from torch import nn\n'), ((7040, 7072), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (7049, 7072), False, 'from torch import nn\n'), ((7082, 7094), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (7092, 7094), False, 'from torch import nn\n'), ((7955, 7987), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (7964, 7987), False, 'from torch import nn\n'), ((7997, 8009), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8007, 8009), False, 'from torch import nn\n'), ((8873, 8905), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (8882, 8905), False, 'from torch import nn\n'), ((8915, 8927), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8925, 8927), False, 'from torch import nn\n'), ((9830, 9862), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', 'num_classes'], {}), '(num_ftrs, num_classes)\n', (9839, 9862), False, 'from torch import nn\n'), ((9872, 9884), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9882, 9884), False, 'from torch import nn\n')]
|
import re
import string
import ipaddress
from . import utils
class RequestProtocol(object):
METHOD_GET = 'GET'
METHOD_OPTIONS = 'OPTIONS'
METHOD_HEAD = 'HEAD'
METHOD_POST = 'POST'
METHOD_PUT = 'PUT'
METHOD_DELETE = 'DELETE'
METHOD_TRACE = 'TRACE'
METHOD_CONNECT = 'CONNECT'
HTTP_VERSION = '1.1'
CRLF = '\r\n'
SP = ' '
REQUEST_LINE = '{method}{sp}{request_uri}{sp}HTTP/{http_version}{crlf}'
@classmethod
def path(cls, uri):
path = uri.path
if uri.query:
path = '{}?{}'.format(path, uri.query)
return path
@classmethod
def header_fields(cls, headers):
return '\r\n'.join(
'{}: {}'.format(h, v) for h, v in headers.items())
@classmethod
def request_line(cls, method, request_uri):
return cls.REQUEST_LINE.format(
method=method,
sp=cls.SP,
request_uri=request_uri,
http_version=cls.HTTP_VERSION,
crlf=cls.CRLF)
class ResponseProtocol(object):
COLON = ':'
HTTP = 'HTTP/'
REGEX_CHARSET = re.compile(r';\s*charset=([^;]*)', re.I)
REGEX_TOKEN = re.compile(
r'([a-zA-Z][a-zA-Z_-]*)\s*(?:=(?:"([^"]*)"|'
r'([^ \t",;]*)))?')
@classmethod
def parse_status(cls, status):
if status.startswith(cls.HTTP):
http_version, status_code, status_text = status.split(None, 2)
status = '{} {}'.format(
utils.smart_text(status_code),
utils.smart_text(status_text))
return status
@classmethod
def parse_status_code(cls, status):
return int(status.split()[0])
@classmethod
def parse_charset(cls, header, charset):
match = cls.REGEX_CHARSET.search(utils.smart_text(header))
if match:
charset = match.group(1)
return charset
@classmethod
def parse_cache_control(cls, header):
header = utils.smart_text(header)
cache = {}
for match in cls.REGEX_TOKEN.finditer(header):
name = match.group(1)
value = match.group(2) or match.group(3) or None
if value and value.isdigit():
value = int(value)
cache[name] = value
cache_control = {}
for n in [
'public',
'no-store',
'no-transform',
'must-revalidate',
'proxy-revalidate',
]:
if n not in cache:
continue
cache_control[n] = None
for n, v in cache.items():
if n not in [
'private',
'no-cache',
'max-age',
's-maxage',
'stale-while-revalidate',
'stale-if-error',
]:
continue
cache_control[n] = v
return cache_control
class URIProtocol(object):
ALPHA = string.ascii_letters
DIGIT = string.digits
UNRESERVED = ALPHA + DIGIT + '-' '.' '_' '~'
GEN_DELIMS = ':' '/' '?' '#' '[' ']' '@'
SUB_DELIMS = '!' '$' '&' '\'' '(' ')' '*' '+' ',' ';' '='
RESERVED = GEN_DELIMS + SUB_DELIMS
PCT_ENCODED = '%' + string.hexdigits
PCHAR = UNRESERVED + PCT_ENCODED + SUB_DELIMS + ':' + '@'
# Parts
SCHEME = ALPHA + DIGIT + '+' + '-' + '.'
QUERY = PCHAR + '/' '?'
FRAGMENT = PCHAR + '/' '?'
# Authority
USERINFO = UNRESERVED + PCT_ENCODED + SUB_DELIMS + ':'
PORT = DIGIT
# Host
REG_NAME = UNRESERVED + PCT_ENCODED + SUB_DELIMS
# Path
SEGMENT = PCHAR
SEGMENT_NZ_NC = UNRESERVED + PCT_ENCODED + SUB_DELIMS + '@'
@classmethod
def strip_scheme(cls, uri):
if ':' not in uri:
return None, uri
scheme, hier_part = uri.split(':', 1)
if scheme[0] not in cls.ALPHA:
raise exc.SchemeException(uri)
if any(c not in cls.SCHEME for c in scheme[1:]):
raise exc.SchemeException(scheme)
return scheme.lower(), hier_part
@classmethod
def strip_fragment(cls, hier_part):
if '#' in hier_part:
hier_part, fragment = \
hier_part.rsplit('#', 1)
if any(c not in cls.FRAGMENT for c in fragment):
raise exc.FragmentException(fragment)
else:
fragment = None
return fragment, hier_part
@classmethod
def strip_query(cls, hier_part):
if '?' in hier_part:
hier_part, query = hier_part.rsplit('?', 1)
if any(c not in cls.QUERY for c in query):
raise exc.QueryException(query)
else:
query = None
return query, hier_part
@classmethod
def strip_authority(cls, hier_part):
if hier_part.startswith('//'):
hier_part = hier_part[2:]
if '/' in hier_part:
authority, hier_part = hier_part.split('/', 1)
hier_part = '/{}'.format(hier_part)
else:
authority = hier_part
hier_part = ''
return authority, hier_part
@classmethod
def strip_userinfo(cls, authority):
if '@' in authority:
userinfo, authority = authority.split('@', 1)
if any(c not in cls.USERINFO for c in userinfo):
raise exc.UserInfoException(userinfo)
if not userinfo:
userinfo = None
else:
userinfo = None
return userinfo, authority
@classmethod
def strip_port(cls, authority):
if ':' in authority:
authority, port = authority.rsplit(':', 1)
else:
return None, authority
if port.isdigit():
port = int(port)
else:
raise exc.PortException(port)
return port, authority
@classmethod
def verify_reg_name(cls, host):
return all(c in cls.REG_NAME for c in host)
@classmethod
def verify_ipv4_address(cls, host):
if '.' not in host:
return False
try:
ipaddress.IPv4Address(host)
except ipaddress.AddressValueError:
return False
else:
return True
@classmethod
def verify_ipv6_address(cls, host):
if ':' not in host:
return False
try:
ipaddress.IPv6Address(host)
except ipaddress.AddressValueError:
return False
else:
return True
@classmethod
def verify_path_abempty(cls, path):
if not path:
return True
if not path.startswith('/'):
return False
segments = path.split('/')
for segment in segments:
if any(c not in cls.SEGMENT for c in segment):
return False
else:
return True
@classmethod
def verify_path_absolute(cls, path):
if not path.startswith('/'):
return False
segments = path.split('/')[1:]
segment = segments[0]
if not segment or any(c not in cls.SEGMENT for c in segment):
return False
for segment in segments[1:]:
if any(c not in cls.SEGMENT for c in segment):
return False
else:
return True
@classmethod
def verify_path_noscheme(cls, path):
if path.startswith('/'):
return False
segments = path.split('/')
segment = segments[0]
if not segment or any(c not in cls.SEGMENT_NZ_NC for c in segment):
return False
for segment in segments[1:]:
if any(c not in cls.SEGMENT for c in segment):
return False
else:
return True
@classmethod
def verify_path_rootless(cls, path):
if path.startswith('/'):
return False
segments = path.split('/')
segment = segments[0]
if not segment or any(c not in cls.SEGMENT for c in segment):
return False
for segment in segments[1:]:
if any(c not in cls.SEGMENT for c in segment):
return False
else:
return True
@classmethod
def verify_path_empty(cls, path):
return bool(path)
|
[
"ipaddress.IPv6Address",
"ipaddress.IPv4Address",
"re.compile"
] |
[((1102, 1142), 're.compile', 're.compile', (['""";\\\\s*charset=([^;]*)"""', 're.I'], {}), "(';\\\\s*charset=([^;]*)', re.I)\n", (1112, 1142), False, 'import re\n'), ((1161, 1233), 're.compile', 're.compile', (['"""([a-zA-Z][a-zA-Z_-]*)\\\\s*(?:=(?:"([^"]*)"|([^ \\\\t",;]*)))?"""'], {}), '(\'([a-zA-Z][a-zA-Z_-]*)\\\\s*(?:=(?:"([^"]*)"|([^ \\\\t",;]*)))?\')\n', (1171, 1233), False, 'import re\n'), ((6060, 6087), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['host'], {}), '(host)\n', (6081, 6087), False, 'import ipaddress\n'), ((6332, 6359), 'ipaddress.IPv6Address', 'ipaddress.IPv6Address', (['host'], {}), '(host)\n', (6353, 6359), False, 'import ipaddress\n')]
|
import numpy as np
import tensiga
from tensiga.iga.Nurbs import Nurbs
from tensiga.iga.Bspline import Bspline
from math import sqrt
import os
def UnitCube(n, p):
dim = n
codim = n
deg = [ p for _ in range(n) ]
kv = [ np.repeat([0., 1.], deg[k]+1) for k in range(n) ]
cp_shape = [ deg[k]+1 for k in range(n) ]
# construct control points for n cube
cp = [None] * dim
cp_proto = [ np.linspace(0., 1., cp_shape[k]) for k in range(n) ]
cp[0] = np.repeat(cp_proto[0], np.prod(cp_shape[0:-1])).reshape(cp_shape)
for k in range(1, codim):
cp[k] = np.tile(np.repeat(cp_proto[k], np.prod(cp_shape[k:-1])), np.prod(cp_shape[0:k])).reshape(cp_shape)
'''
dim = 2
codim = 2
deg = [2, 2]
kv = [ np.array([0.,0.,0.,1.,1.,1.]), np.array([0.,0.,0.,1.,1.,1.]) ]
ctrlpts = []
cp_shape = (3,3)
x = [ 0., 0., 0., 0.5, 0.5, 0.5, 1., 1., 1. ]
y = [ 0., 0.5, 1., 0., 0.5, 1., 0., 0.5, 1 ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
'''
# init primitive spline
domain = Bspline(dim, codim, kv, deg, cp)
return domain
def DiscontinuousLine(p):
deg = [p]
kv = [ np.hstack([
np.repeat(0., p+1),
np.array(0.2),
np.repeat(0.5, p+1),
np.array(0.75),
np.repeat(1., p+1) ])
]
# construct control points for n cube
cp = [ np.zeros((kv[0].size - deg[0] - 1)) ]
# init primitive spline
domain = Bspline(1, 1, kv, deg, cp)
return domain
def OpenUnitBasis(n, p, N):
dim = n
codim = n
deg = [ p for _ in range(n) ]
kv = [ np.linspace(0, 1, N) for k in range(n) ]
cp_shape = [ kv[k].size-p-1 for k in range(n) ]
# construct control points for n cube
cp = [None] * dim
cp_proto = [ np.zeros(cp_shape[k]) for k in range(n) ]
cp[0] = np.repeat(cp_proto[0], np.prod(cp_shape[0:-1])).reshape(cp_shape)
for k in range(1, codim):
cp[k] = np.tile(np.repeat(cp_proto[k], np.prod(cp_shape[k:-1])), np.prod(cp_shape[0:k])).reshape(cp_shape)
# init primitive spline
domain = Bspline(dim, codim, kv, deg, cp)
return domain
def QuarterAnnulus2D(R, r):
# geometry parameters
dim = 2;
codim = 2;
deg = [2, 1]
kv = [ np.array([0., 0., 0., 1., 1., 1.]), np.array([0., 0., 1., 1.])]
ctrlpts = []
cp_shape = (3, 2)
x = [ R, r, R, r, 0., 0. ] # numpy ordering
y = [ 0., 0., R, r, R, r ] #
w = [ 1., 1., 1./sqrt(2.), 1./sqrt(2.), 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
# init primitive spline
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def QuarterAnnulus3D(R, r, L):
## define spline data
dim = 3;
codim = 3;
deg = [2, 1, 1]
kv = [ np.array([0., 0., 0., 1., 1., 1.]),
np.array([0., 0., 1., 1.]),
np.array([0., 0., 1., 1.]) ]
# this is using the numpy ordering
ctrlpts = []
cp_shape = (3,2,2)
x = [ R, R, r, r, R, R, r, r, .0, .0, .0, .0 ]
y = [ 0, 0., 0., 0., R, R, r, r, R, R, r, r ]
z = [ 0, L, 0., L, 0., L, 0., L, 0., L, 0., L ]
w = [ 1., 1., 1., 1., 1./sqrt(2.), 1./sqrt(2.), 1./sqrt(2.), 1./sqrt(2.), 1., 1., 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(z, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
## init bspline object
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Halfpipe2D(R, r):
# geometry parameters
dim = 2;
codim = 2;
deg = [2, 1]
kv = [ np.array([0., 0., 0., .5, .5, 1., 1., 1.]), np.array([0., 0., 1., 1.])]
ctrlpts = []
cp_shape = (5, 2)
W = 1./sqrt(2.)
y = [ -R, -r, -R, -r, 0., 0., R, r, R, r ] # numpy ordering
x = [ 0., 0., R, r, R, r, R, r, 0., 0. ] #
w = [ 1., 1., W, W, 1, 1, W, W, 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
# init primitive spline
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Halfpipe3D(R, r, L):
dim = 3;
codim = 3;
deg = [2, 2, 2]
kv = [ np.array([0., 0., 0., 0.5, 0.5, 1., 1., 1.]),
np.array([0., 0., 0., 1., 1., 1.]),
np.array([0., 0., 0., 1., 1., 1.]) ]
ctrlpts = []
cp_shape = (5,3,3)
W = sqrt(2)
#W = 1.
x=[-R,-R,-R,-(R+r)/2,-(R+r)/2,-(R+r)/2,-r,-r,-r,-R,-R,-R,-(R+r)/2,-(R+r)/2,-(R+r)/2,-r,-r,-r,0,0,0,0,0,0,0,0,0,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r]
y=[0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L]
z=[0,0,0,0,0,0,0,0,0,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,0,0,0,0,0,0,0,0,0]
w=[1.,1.,1.,1.,1.,1.,1.,1.,1.,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1,1,1,1,1,1,1,1,1,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1.,1.,1.,1.,1.,1.,1.,1.,1.]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(z, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
## init bspline object
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Shell3d():
dim = 3
codim = 3
deg = [2, 2, 2]
kv = [ np.array([0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959, 6.28318530717959, 6.28318530717959]),
np.array([-88.6003574854838,-88.6003574854838,-88.6003574854838,-2,-2,-1,-1,0,0,0])+88.6003574854838,
np.array([0.,0.,0.,1.,1.,1.])]
kv = [ v/v[-1] for v in kv ]
module_path = os.path.dirname(tensiga.__file__)
inner = np.loadtxt(module_path+'/utils/rhino_data/cps_inner.txt')
center = np.loadtxt(module_path+'/utils/rhino_data/cps_center.txt')
outer = np.loadtxt(module_path+'/utils/rhino_data/cps_outer.txt')
x, y, z, w = [], [], [], []
surfs = [outer, center, inner]
for surf in surfs:
# extract weights
w_surf = surf[:,3]
# project back
npts = surf.shape[0]
surf = surf[:,0:3]/w_surf.reshape(npts, -1)
x_surf = surf[:,0]
y_surf = surf[:,1]
z_surf = surf[:,2]
x.append(x_surf)
y.append(y_surf)
z.append(z_surf)
w.append(w_surf)
x = np.ascontiguousarray(np.hstack(x))
y = np.ascontiguousarray(np.hstack(y))
z = np.ascontiguousarray(np.hstack(z))
w = np.ascontiguousarray(np.hstack(w))
cp_shape = (9,7,3)
ctrlpts = [np.array(x).reshape(cp_shape, order='F'),
np.array(y).reshape(cp_shape, order='F'),
np.array(z).reshape(cp_shape, order='F'),
np.array(w).reshape(cp_shape, order='F')]
spline = Nurbs(dim, codim, kv, deg, ctrlpts)
return spline
# gets C0 bspline mesh of given size for any domain
def interpolation_mesh(domain, ref_nodes, p=1):
idomain = UnitCube(domain.dim, 1)
for k in range(0, idomain.dim):
idomain.href(ref_nodes[k], k)
ep = [ np.unique(kv) for kv in idomain.kv ]
ctrlpts = [ domain.eval(ep, k) for k in range(domain.dim) ]
idomain.ctrlpts = ctrlpts
if int(p) > 1:
for k in range(idomain.dim):
idomain.pref(p-1, k)
return idomain
|
[
"math.sqrt",
"tensiga.iga.Bspline.Bspline",
"os.path.dirname",
"numpy.zeros",
"numpy.hstack",
"tensiga.iga.Nurbs.Nurbs",
"numpy.prod",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"numpy.unique",
"numpy.repeat"
] |
[((1134, 1166), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['dim', 'codim', 'kv', 'deg', 'cp'], {}), '(dim, codim, kv, deg, cp)\n', (1141, 1166), False, 'from tensiga.iga.Bspline import Bspline\n'), ((1547, 1573), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['(1)', '(1)', 'kv', 'deg', 'cp'], {}), '(1, 1, kv, deg, cp)\n', (1554, 1573), False, 'from tensiga.iga.Bspline import Bspline\n'), ((2175, 2207), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['dim', 'codim', 'kv', 'deg', 'cp'], {}), '(dim, codim, kv, deg, cp)\n', (2182, 2207), False, 'from tensiga.iga.Bspline import Bspline\n'), ((2815, 2850), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (2820, 2850), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((3734, 3769), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (3739, 3769), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((4421, 4456), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (4426, 4456), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((4751, 4758), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (4755, 4758), False, 'from math import sqrt\n'), ((5716, 5751), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (5721, 5751), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((6231, 6264), 'os.path.dirname', 'os.path.dirname', (['tensiga.__file__'], {}), '(tensiga.__file__)\n', (6246, 6264), False, 'import os\n'), ((6277, 6336), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_inner.txt')"], {}), "(module_path + '/utils/rhino_data/cps_inner.txt')\n", (6287, 6336), True, 'import numpy as np\n'), ((6348, 6408), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_center.txt')"], {}), "(module_path + '/utils/rhino_data/cps_center.txt')\n", (6358, 6408), True, 'import numpy as np\n'), ((6419, 6478), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_outer.txt')"], {}), "(module_path + '/utils/rhino_data/cps_outer.txt')\n", (6429, 6478), True, 'import numpy as np\n'), ((7349, 7384), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (7354, 7384), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((234, 267), 'numpy.repeat', 'np.repeat', (['[0.0, 1.0]', '(deg[k] + 1)'], {}), '([0.0, 1.0], deg[k] + 1)\n', (243, 267), True, 'import numpy as np\n'), ((412, 446), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'cp_shape[k]'], {}), '(0.0, 1.0, cp_shape[k])\n', (423, 446), True, 'import numpy as np\n'), ((1467, 1500), 'numpy.zeros', 'np.zeros', (['(kv[0].size - deg[0] - 1)'], {}), '(kv[0].size - deg[0] - 1)\n', (1475, 1500), True, 'import numpy as np\n'), ((1692, 1712), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1703, 1712), True, 'import numpy as np\n'), ((1867, 1888), 'numpy.zeros', 'np.zeros', (['cp_shape[k]'], {}), '(cp_shape[k])\n', (1875, 1888), True, 'import numpy as np\n'), ((2337, 2377), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (2345, 2377), True, 'import numpy as np\n'), ((2373, 2403), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (2381, 2403), True, 'import numpy as np\n'), ((2986, 3026), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (2994, 3026), True, 'import numpy as np\n'), ((3033, 3063), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3041, 3063), True, 'import numpy as np\n'), ((3072, 3102), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3080, 3102), True, 'import numpy as np\n'), ((3893, 3943), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0])\n', (3901, 3943), True, 'import numpy as np\n'), ((3937, 3967), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3945, 3967), True, 'import numpy as np\n'), ((4016, 4025), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4020, 4025), False, 'from math import sqrt\n'), ((4560, 4610), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0])\n', (4568, 4610), True, 'import numpy as np\n'), ((4617, 4657), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (4625, 4657), True, 'import numpy as np\n'), ((4664, 4704), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (4672, 4704), True, 'import numpy as np\n'), ((5844, 6032), 'numpy.array', 'np.array', (['[0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, \n 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959,\n 6.28318530717959, 6.28318530717959]'], {}), '([0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, \n 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959,\n 6.28318530717959, 6.28318530717959])\n', (5852, 6032), True, 'import numpy as np\n'), ((6148, 6188), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (6156, 6188), True, 'import numpy as np\n'), ((6940, 6952), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (6949, 6952), True, 'import numpy as np\n'), ((6983, 6995), 'numpy.hstack', 'np.hstack', (['y'], {}), '(y)\n', (6992, 6995), True, 'import numpy as np\n'), ((7026, 7038), 'numpy.hstack', 'np.hstack', (['z'], {}), '(z)\n', (7035, 7038), True, 'import numpy as np\n'), ((7069, 7081), 'numpy.hstack', 'np.hstack', (['w'], {}), '(w)\n', (7078, 7081), True, 'import numpy as np\n'), ((7633, 7646), 'numpy.unique', 'np.unique', (['kv'], {}), '(kv)\n', (7642, 7646), True, 'import numpy as np\n'), ((2543, 2552), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2547, 2552), False, 'from math import sqrt\n'), ((2556, 2565), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2560, 2565), False, 'from math import sqrt\n'), ((3363, 3372), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3367, 3372), False, 'from math import sqrt\n'), ((3376, 3385), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3380, 3385), False, 'from math import sqrt\n'), ((3389, 3398), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3393, 3398), False, 'from math import sqrt\n'), ((3402, 3411), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3406, 3411), False, 'from math import sqrt\n'), ((6036, 6133), 'numpy.array', 'np.array', (['[-88.6003574854838, -88.6003574854838, -88.6003574854838, -2, -2, -1, -1, 0,\n 0, 0]'], {}), '([-88.6003574854838, -88.6003574854838, -88.6003574854838, -2, -2, \n -1, -1, 0, 0, 0])\n', (6044, 6133), True, 'import numpy as np\n'), ((501, 524), 'numpy.prod', 'np.prod', (['cp_shape[0:-1]'], {}), '(cp_shape[0:-1])\n', (508, 524), True, 'import numpy as np\n'), ((1261, 1282), 'numpy.repeat', 'np.repeat', (['(0.0)', '(p + 1)'], {}), '(0.0, p + 1)\n', (1270, 1282), True, 'import numpy as np\n'), ((1293, 1306), 'numpy.array', 'np.array', (['(0.2)'], {}), '(0.2)\n', (1301, 1306), True, 'import numpy as np\n'), ((1320, 1341), 'numpy.repeat', 'np.repeat', (['(0.5)', '(p + 1)'], {}), '(0.5, p + 1)\n', (1329, 1341), True, 'import numpy as np\n'), ((1353, 1367), 'numpy.array', 'np.array', (['(0.75)'], {}), '(0.75)\n', (1361, 1367), True, 'import numpy as np\n'), ((1381, 1402), 'numpy.repeat', 'np.repeat', (['(1.0)', '(p + 1)'], {}), '(1.0, p + 1)\n', (1390, 1402), True, 'import numpy as np\n'), ((1945, 1968), 'numpy.prod', 'np.prod', (['cp_shape[0:-1]'], {}), '(cp_shape[0:-1])\n', (1952, 1968), True, 'import numpy as np\n'), ((2594, 2621), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (2602, 2621), True, 'import numpy as np\n'), ((2660, 2687), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (2668, 2687), True, 'import numpy as np\n'), ((2726, 2753), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (2734, 2753), True, 'import numpy as np\n'), ((3448, 3475), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (3456, 3475), True, 'import numpy as np\n'), ((3514, 3541), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (3522, 3541), True, 'import numpy as np\n'), ((3580, 3607), 'numpy.array', 'np.array', (['z'], {'dtype': 'np.float'}), '(z, dtype=np.float)\n', (3588, 3607), True, 'import numpy as np\n'), ((3646, 3673), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (3654, 3673), True, 'import numpy as np\n'), ((4200, 4227), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (4208, 4227), True, 'import numpy as np\n'), ((4266, 4293), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (4274, 4293), True, 'import numpy as np\n'), ((4332, 4359), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (4340, 4359), True, 'import numpy as np\n'), ((5430, 5457), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (5438, 5457), True, 'import numpy as np\n'), ((5496, 5523), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (5504, 5523), True, 'import numpy as np\n'), ((5562, 5589), 'numpy.array', 'np.array', (['z'], {'dtype': 'np.float'}), '(z, dtype=np.float)\n', (5570, 5589), True, 'import numpy as np\n'), ((5628, 5655), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (5636, 5655), True, 'import numpy as np\n'), ((7122, 7133), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7130, 7133), True, 'import numpy as np\n'), ((7179, 7190), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7187, 7190), True, 'import numpy as np\n'), ((7236, 7247), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (7244, 7247), True, 'import numpy as np\n'), ((7293, 7304), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (7301, 7304), True, 'import numpy as np\n'), ((647, 669), 'numpy.prod', 'np.prod', (['cp_shape[0:k]'], {}), '(cp_shape[0:k])\n', (654, 669), True, 'import numpy as np\n'), ((2091, 2113), 'numpy.prod', 'np.prod', (['cp_shape[0:k]'], {}), '(cp_shape[0:k])\n', (2098, 2113), True, 'import numpy as np\n'), ((621, 644), 'numpy.prod', 'np.prod', (['cp_shape[k:-1]'], {}), '(cp_shape[k:-1])\n', (628, 644), True, 'import numpy as np\n'), ((2065, 2088), 'numpy.prod', 'np.prod', (['cp_shape[k:-1]'], {}), '(cp_shape[k:-1])\n', (2072, 2088), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 15:45:53 2020
@author: Antony
"""
import matplotlib.pyplot as plt
import time
from skimage.draw import random_shapes
import numpy as np
import astra
def cirmask(im, npx=0):
"""
Apply a circular mask to the image
"""
sz = np.floor(im.shape[0])
x = np.arange(0,sz)
x = np.tile(x,(int(sz),1))
y = np.swapaxes(x,0,1)
xc = np.round(sz/2)
yc = np.round(sz/2)
r = np.sqrt(((x-xc)**2 + (y-yc)**2));
dim = im.shape
if len(dim)==2:
im = np.where(r>np.floor(sz/2) - npx,0,im)
elif len(dim)==3:
for ii in range(0,dim[2]):
im[:,:,ii] = np.where(r>np.floor(sz/2),0,im[:,:,ii])
return(im)
#%% Create a test image
start=time.time()
sz=64; min_shapes=3; max_shapes=10; min_size=2; max_size=10
image, _ = random_shapes((sz, sz), min_shapes=min_shapes, max_shapes=max_shapes, multichannel=False,
min_size=min_size, max_size=max_size, allow_overlap=True)
image = np.where(image==255, 1, image)
image = cirmask(image,5)
image = image/np.max(image)
ct = 2**8
image = np.random.poisson(lam=(image)*ct, size=None)/ct
image = image/np.max(image)
print((time.time()-start))
plt.figure(1);plt.clf();plt.imshow(image, cmap='jet');plt.show();
#%% Perform first a test for sinogram creation and image reconstruction
npr = image.shape[0] # Number of projections
# Create a basic square volume geometry
vol_geom = astra.create_vol_geom(image.shape[0], image.shape[0])
# Create a parallel beam geometry with 180 angles between 0 and pi, and image.shape[0] detector pixels of width 1.
proj_geom = astra.create_proj_geom('parallel', 1.0, int(1.0*image.shape[0]), np.linspace(0,np.pi,npr,False))
# Create a sinogram using the GPU.
proj_id = astra.create_projector('cuda',proj_geom,vol_geom)
start=time.time()
sinogram_id, sinogram = astra.create_sino(image, proj_id)
print((time.time()-start))
plt.figure(1);plt.clf();plt.imshow(sinogram, cmap='jet');plt.show();
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the GPU
# cfg = astra.astra_dict('SIRT_CUDA')
cfg = astra.astra_dict('FBP_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = { 'FilterType': 'shepp-logan' }
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
astra.algorithm.run(alg_id)
# Get the result
start=time.time()
rec = astra.data2d.get(rec_id)
print((time.time()-start))
rec = np.where(rec<0, 0, rec)
rec = cirmask(rec)
plt.figure(2);plt.clf();plt.imshow(np.concatenate((rec,image),axis=1), cmap='jet');
plt.colorbar();
plt.show();
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
#%% Perform a test using the SampleGen module
import os
from pathlib import Path
p = Path("C:\\Users\\Antony\\Documents\\GitHub\\NNs_in_Tensorflow2\\Libraries")
p = Path("C:\\Users\\Simon\\Documents\\GitHub\\NNs_in_Tensorflow2\\Libraries")
os.chdir(p)
from scipy.ndimage import gaussian_filter, uniform_filter, median_filter
import SampleGen as sg
sml = sg.random_sample()
sz=64; min_shapes=3; max_shapes=10; min_size=2; max_size=10
sml.set_pars( sz=sz, min_shapes=min_shapes, max_shapes=max_shapes, min_size = min_size, max_size=max_size)
sml.create_image()
ct = 2**8
sml.im = np.random.poisson(lam=(sml.im)*ct, size=None)/ct
sml.im = sml.im/np.max(sml.im)
# Apply a filter
sml.im = gaussian_filter(sml.im, sigma=1)
# sml.im = uniform_filter(sml.im, size=3)
# sml.im = uniform_filter(sml.im, size=5)
sml.im = cirmask(sml.im,5)
plt.figure(1);plt.clf();plt.imshow(sml.im, cmap='jet');plt.colorbar();plt.show();
sml.create_sino_geo()
sml.create_sino()
plt.figure(2);plt.clf();plt.imshow(sml.sinogram, cmap='jet');plt.colorbar();plt.show();
#%% Create a library
nims = 100000
filtering = 1
noise = 1
scaling = 0
sz=32; min_shapes=3; max_shapes=10; min_size=3; max_size=10
ct = 2**8
start=time.time()
im = np.zeros((sz,sz,nims))
s = np.zeros((sz,sz,nims))
sf = np.random.rand(nims,)*10.0**-np.random.uniform(0, 2, nims)
for ii in range(nims):
sml.set_pars( sz=sz, min_shapes=min_shapes, max_shapes=max_shapes, min_size = min_size, max_size=max_size)
image = sml.create_image()
image = cirmask(image,5)
if np.max(image)>0:
image = image/np.max(image)
if noise == True:
image = np.random.poisson(lam=(image)*ct, size=None)/ct
if np.max(image)>0:
image = image/np.max(image)
if filtering == True:
image = gaussian_filter(image, sigma=1)
if np.max(image)>0:
image = image/np.max(image)
if scaling == True:
image = image * sf[ii]
sml.im = image
im[:,:,ii] = image
s[:,:,ii] = sml.create_sino()
if np.mod(ii, 100) == 0:
print(ii)
print((time.time()-start))
# sml.astraclean()
#%%
for ii in range(0, nims):
plt.figure(2);plt.clf();
plt.imshow(im[:,:,ii], cmap = 'jet')
plt.colorbar();
plt.show()
plt.pause(1)
#%%
import h5py
p = Path('./')
fn = Path("%s\\shapes_random_noise_%dpx_norm.h5" %(p, sz))
h5f = h5py.File(fn, "w")
h5f.create_dataset('Sinograms', data = s)
h5f.create_dataset('Images', data = im)
h5f.create_dataset('ScaleFactor', data = sf)
h5f.create_dataset('Noise', data = noise)
h5f.create_dataset('NImages', data = nims)
h5f.create_dataset('ImageSize', data = sz)
h5f.close()
|
[
"matplotlib.pyplot.clf",
"numpy.floor",
"matplotlib.pyplot.figure",
"astra.data2d.get",
"pathlib.Path",
"SampleGen.random_sample",
"numpy.arange",
"numpy.round",
"astra.create_vol_geom",
"os.chdir",
"astra.create_projector",
"numpy.random.rand",
"skimage.draw.random_shapes",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.swapaxes",
"numpy.random.poisson",
"numpy.linspace",
"matplotlib.pyplot.pause",
"astra.algorithm.delete",
"astra.data2d.create",
"h5py.File",
"matplotlib.pyplot.show",
"numpy.mod",
"astra.algorithm.create",
"numpy.concatenate",
"astra.data2d.delete",
"numpy.random.uniform",
"astra.create_sino",
"numpy.zeros",
"time.time",
"numpy.where",
"astra.astra_dict",
"astra.projector.delete",
"astra.algorithm.run",
"numpy.sqrt"
] |
[((820, 831), 'time.time', 'time.time', ([], {}), '()\n', (829, 831), False, 'import time\n'), ((907, 1063), 'skimage.draw.random_shapes', 'random_shapes', (['(sz, sz)'], {'min_shapes': 'min_shapes', 'max_shapes': 'max_shapes', 'multichannel': '(False)', 'min_size': 'min_size', 'max_size': 'max_size', 'allow_overlap': '(True)'}), '((sz, sz), min_shapes=min_shapes, max_shapes=max_shapes,\n multichannel=False, min_size=min_size, max_size=max_size, allow_overlap\n =True)\n', (920, 1063), False, 'from skimage.draw import random_shapes\n'), ((1090, 1122), 'numpy.where', 'np.where', (['(image == 255)', '(1)', 'image'], {}), '(image == 255, 1, image)\n', (1098, 1122), True, 'import numpy as np\n'), ((1310, 1323), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1320, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1333), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1331, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1363), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""jet"""'}), "(image, cmap='jet')\n", (1344, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1372, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1607), 'astra.create_vol_geom', 'astra.create_vol_geom', (['image.shape[0]', 'image.shape[0]'], {}), '(image.shape[0], image.shape[0])\n', (1575, 1607), False, 'import astra\n'), ((1881, 1932), 'astra.create_projector', 'astra.create_projector', (['"""cuda"""', 'proj_geom', 'vol_geom'], {}), "('cuda', proj_geom, vol_geom)\n", (1903, 1932), False, 'import astra\n'), ((1940, 1951), 'time.time', 'time.time', ([], {}), '()\n', (1949, 1951), False, 'import time\n'), ((1977, 2010), 'astra.create_sino', 'astra.create_sino', (['image', 'proj_id'], {}), '(image, proj_id)\n', (1994, 2010), False, 'import astra\n'), ((2042, 2055), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2052, 2055), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2065), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2063, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2098), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sinogram'], {'cmap': '"""jet"""'}), "(sinogram, cmap='jet')\n", (2076, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2107, 2109), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2207), 'astra.data2d.create', 'astra.data2d.create', (['"""-vol"""', 'vol_geom'], {}), "('-vol', vol_geom)\n", (2189, 2207), False, 'import astra\n'), ((2326, 2354), 'astra.astra_dict', 'astra.astra_dict', (['"""FBP_CUDA"""'], {}), "('FBP_CUDA')\n", (2342, 2354), False, 'import astra\n'), ((2557, 2584), 'astra.algorithm.create', 'astra.algorithm.create', (['cfg'], {}), '(cfg)\n', (2579, 2584), False, 'import astra\n'), ((2586, 2613), 'astra.algorithm.run', 'astra.algorithm.run', (['alg_id'], {}), '(alg_id)\n', (2605, 2613), False, 'import astra\n'), ((2641, 2652), 'time.time', 'time.time', ([], {}), '()\n', (2650, 2652), False, 'import time\n'), ((2660, 2684), 'astra.data2d.get', 'astra.data2d.get', (['rec_id'], {}), '(rec_id)\n', (2676, 2684), False, 'import astra\n'), ((2722, 2747), 'numpy.where', 'np.where', (['(rec < 0)', '(0)', 'rec'], {}), '(rec < 0, 0, rec)\n', (2730, 2747), True, 'import numpy as np\n'), ((2769, 2782), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2779, 2782), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2792), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2868), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2866, 2868), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2879, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2918), 'astra.data2d.delete', 'astra.data2d.delete', (['sinogram_id'], {}), '(sinogram_id)\n', (2905, 2918), False, 'import astra\n'), ((2920, 2951), 'astra.projector.delete', 'astra.projector.delete', (['proj_id'], {}), '(proj_id)\n', (2942, 2951), False, 'import astra\n'), ((2953, 2983), 'astra.algorithm.delete', 'astra.algorithm.delete', (['alg_id'], {}), '(alg_id)\n', (2975, 2983), False, 'import astra\n'), ((2985, 3012), 'astra.data2d.delete', 'astra.data2d.delete', (['rec_id'], {}), '(rec_id)\n', (3004, 3012), False, 'import astra\n'), ((3106, 3181), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\Antony\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries"""'], {}), "('C:\\\\Users\\\\Antony\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries')\n", (3110, 3181), False, 'from pathlib import Path\n'), ((3187, 3261), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\Simon\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries"""'], {}), "('C:\\\\Users\\\\Simon\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries')\n", (3191, 3261), False, 'from pathlib import Path\n'), ((3263, 3274), 'os.chdir', 'os.chdir', (['p'], {}), '(p)\n', (3271, 3274), False, 'import os\n'), ((3384, 3402), 'SampleGen.random_sample', 'sg.random_sample', ([], {}), '()\n', (3400, 3402), True, 'import SampleGen as sg\n'), ((3730, 3762), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['sml.im'], {'sigma': '(1)'}), '(sml.im, sigma=1)\n', (3745, 3762), False, 'from scipy.ndimage import gaussian_filter, uniform_filter, median_filter\n'), ((3882, 3895), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3892, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3896, 3905), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3903, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3936), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sml.im'], {'cmap': '"""jet"""'}), "(sml.im, cmap='jet')\n", (3916, 3936), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3951), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3949, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3952, 3962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3960, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4024), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4021, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4034), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4032, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4071), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sml.sinogram'], {'cmap': '"""jet"""'}), "(sml.sinogram, cmap='jet')\n", (4045, 4071), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4086), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4084, 4086), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4095, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4268, 4279), 'time.time', 'time.time', ([], {}), '()\n', (4277, 4279), False, 'import time\n'), ((4288, 4312), 'numpy.zeros', 'np.zeros', (['(sz, sz, nims)'], {}), '((sz, sz, nims))\n', (4296, 4312), True, 'import numpy as np\n'), ((4316, 4340), 'numpy.zeros', 'np.zeros', (['(sz, sz, nims)'], {}), '((sz, sz, nims))\n', (4324, 4340), True, 'import numpy as np\n'), ((5504, 5514), 'pathlib.Path', 'Path', (['"""./"""'], {}), "('./')\n", (5508, 5514), False, 'from pathlib import Path\n'), ((5523, 5577), 'pathlib.Path', 'Path', (["('%s\\\\shapes_random_noise_%dpx_norm.h5' % (p, sz))"], {}), "('%s\\\\shapes_random_noise_%dpx_norm.h5' % (p, sz))\n", (5527, 5577), False, 'from pathlib import Path\n'), ((5586, 5604), 'h5py.File', 'h5py.File', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (5595, 5604), False, 'import h5py\n'), ((331, 352), 'numpy.floor', 'np.floor', (['im.shape[0]'], {}), '(im.shape[0])\n', (339, 352), True, 'import numpy as np\n'), ((362, 378), 'numpy.arange', 'np.arange', (['(0)', 'sz'], {}), '(0, sz)\n', (371, 378), True, 'import numpy as np\n'), ((419, 439), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (430, 439), True, 'import numpy as np\n'), ((454, 470), 'numpy.round', 'np.round', (['(sz / 2)'], {}), '(sz / 2)\n', (462, 470), True, 'import numpy as np\n'), ((479, 495), 'numpy.round', 'np.round', (['(sz / 2)'], {}), '(sz / 2)\n', (487, 495), True, 'import numpy as np\n'), ((509, 547), 'numpy.sqrt', 'np.sqrt', (['((x - xc) ** 2 + (y - yc) ** 2)'], {}), '((x - xc) ** 2 + (y - yc) ** 2)\n', (516, 547), True, 'import numpy as np\n'), ((1162, 1175), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1168, 1175), True, 'import numpy as np\n'), ((1200, 1244), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(image * ct)', 'size': 'None'}), '(lam=image * ct, size=None)\n', (1217, 1244), True, 'import numpy as np\n'), ((1263, 1276), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1269, 1276), True, 'import numpy as np\n'), ((1802, 1835), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'npr', '(False)'], {}), '(0, np.pi, npr, False)\n', (1813, 1835), True, 'import numpy as np\n'), ((2804, 2840), 'numpy.concatenate', 'np.concatenate', (['(rec, image)'], {'axis': '(1)'}), '((rec, image), axis=1)\n', (2818, 2840), True, 'import numpy as np\n'), ((3619, 3664), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(sml.im * ct)', 'size': 'None'}), '(lam=sml.im * ct, size=None)\n', (3636, 3664), True, 'import numpy as np\n'), ((3685, 3699), 'numpy.max', 'np.max', (['sml.im'], {}), '(sml.im)\n', (3691, 3699), True, 'import numpy as np\n'), ((4347, 4367), 'numpy.random.rand', 'np.random.rand', (['nims'], {}), '(nims)\n', (4361, 4367), True, 'import numpy as np\n'), ((5335, 5348), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5345, 5348), True, 'import matplotlib.pyplot as plt\n'), ((5349, 5358), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5356, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5407), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im[:, :, ii]'], {'cmap': '"""jet"""'}), "(im[:, :, ii], cmap='jet')\n", (5381, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5419, 5433), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5431, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5454, 5456), True, 'import matplotlib.pyplot as plt\n'), ((5464, 5476), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (5473, 5476), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1298), 'time.time', 'time.time', ([], {}), '()\n', (1296, 1298), False, 'import time\n'), ((2019, 2030), 'time.time', 'time.time', ([], {}), '()\n', (2028, 2030), False, 'import time\n'), ((2693, 2704), 'time.time', 'time.time', ([], {}), '()\n', (2702, 2704), False, 'import time\n'), ((4637, 4650), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4643, 4650), True, 'import numpy as np\n'), ((4901, 4932), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(1)'}), '(image, sigma=1)\n', (4916, 4932), False, 'from scipy.ndimage import gaussian_filter, uniform_filter, median_filter\n'), ((5185, 5200), 'numpy.mod', 'np.mod', (['ii', '(100)'], {}), '(ii, 100)\n', (5191, 5200), True, 'import numpy as np\n'), ((5240, 5251), 'time.time', 'time.time', ([], {}), '()\n', (5249, 5251), False, 'import time\n'), ((4376, 4405), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)', 'nims'], {}), '(0, 2, nims)\n', (4393, 4405), True, 'import numpy as np\n'), ((4677, 4690), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4683, 4690), True, 'import numpy as np\n'), ((4737, 4781), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(image * ct)', 'size': 'None'}), '(lam=image * ct, size=None)\n', (4754, 4781), True, 'import numpy as np\n'), ((4797, 4810), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4803, 4810), True, 'import numpy as np\n'), ((4945, 4958), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4951, 4958), True, 'import numpy as np\n'), ((4841, 4854), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4847, 4854), True, 'import numpy as np\n'), ((4989, 5002), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4995, 5002), True, 'import numpy as np\n'), ((616, 632), 'numpy.floor', 'np.floor', (['(sz / 2)'], {}), '(sz / 2)\n', (624, 632), True, 'import numpy as np\n'), ((739, 755), 'numpy.floor', 'np.floor', (['(sz / 2)'], {}), '(sz / 2)\n', (747, 755), True, 'import numpy as np\n')]
|
import os
from flask import current_app
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import send_from_directory
from flask import url_for
from werkzeug.utils import secure_filename
from utils import download
from . import main
from .. import bof
from .forms import ImgForm
from .forms import URLForm
from utils import download_image_url
@main.route('/', methods=['GET', 'POST'])
def index():
imgform = ImgForm()
urlform = URLForm()
# check if it is a POST request and if it is valid.
if imgform.validate_on_submit():
file = imgform.fileimg.data
filename = secure_filename(file.filename)
filepath = os.path.join(current_app.config['UPLOAD_DIR'], filename)
if not os.path.exists(filepath):
file.save(filepath)
# In case blueprints are active you can shortcut references to the same
# blueprint by prefixing the local endpoint with a dot(.).
return redirect(url_for('.result', filename=filename))
elif urlform.validate_on_submit():
# image url, like https://icatcare.org/app/uploads/2018/07/Thinking-of-getting-a-cat.png
url = urlform.txturl.data
# filename, like Thinking-of-getting-a-cat.png
filename = secure_filename(url.split('/')[-1])
# E:\cbir_system\app/static/uploads\Thinking-of-getting-a-cat.png
filepath = os.path.join(current_app.config['UPLOAD_DIR'], filename)
# download(url, current_app.config['UPLOAD_DIR'], filename)
download_image_url(url, filepath)
if not os.path.exists(filepath):
flash('无法下载指定URL的图片')
return redirect(url_for('.index'))
else:
return redirect(url_for('.result', filename=filename))
return render_template('index.html')
@main.route('/result', methods=['GET'])
def result():
filename = request.args.get('filename')
uri = os.path.join(current_app.config['UPLOAD_DIR'], filename)
# images = bof.match(uri, top_k=20)
images = bof.match(uri, top_k=10)
return render_template('result.html', filename=filename, images=images)
# string: Accepts any text without a slash (the default).
# int: Accepts integers.
# float: Accepts numerical values containing decimal points.
# path: Similar to a string, but accepts slashes.
# as_attachment – set to True if you want to send this file with a Content-Disposition: attachment header.
# client-users can download file to the local host from server using this function
# show similar images as the result in the web
@main.route('/images/<path:file_dir>')
def expose_file(file_dir):
print("显示图像:" + file_dir)
return send_from_directory(current_app.config['BASE_DIR'], file_dir, as_attachment=True)
|
[
"flask.flash",
"flask.request.args.get",
"os.path.exists",
"werkzeug.utils.secure_filename",
"flask.url_for",
"flask.render_template",
"utils.download_image_url",
"flask.send_from_directory",
"os.path.join"
] |
[((1812, 1841), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1827, 1841), False, 'from flask import render_template\n'), ((1913, 1941), 'flask.request.args.get', 'request.args.get', (['"""filename"""'], {}), "('filename')\n", (1929, 1941), False, 'from flask import request\n'), ((1952, 2008), 'os.path.join', 'os.path.join', (["current_app.config['UPLOAD_DIR']", 'filename'], {}), "(current_app.config['UPLOAD_DIR'], filename)\n", (1964, 2008), False, 'import os\n'), ((2098, 2162), 'flask.render_template', 'render_template', (['"""result.html"""'], {'filename': 'filename', 'images': 'images'}), "('result.html', filename=filename, images=images)\n", (2113, 2162), False, 'from flask import render_template\n'), ((2705, 2791), 'flask.send_from_directory', 'send_from_directory', (["current_app.config['BASE_DIR']", 'file_dir'], {'as_attachment': '(True)'}), "(current_app.config['BASE_DIR'], file_dir, as_attachment\n =True)\n", (2724, 2791), False, 'from flask import send_from_directory\n'), ((668, 698), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (683, 698), False, 'from werkzeug.utils import secure_filename\n'), ((718, 774), 'os.path.join', 'os.path.join', (["current_app.config['UPLOAD_DIR']", 'filename'], {}), "(current_app.config['UPLOAD_DIR'], filename)\n", (730, 774), False, 'import os\n'), ((790, 814), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (804, 814), False, 'import os\n'), ((1019, 1056), 'flask.url_for', 'url_for', (['""".result"""'], {'filename': 'filename'}), "('.result', filename=filename)\n", (1026, 1056), False, 'from flask import url_for\n'), ((1431, 1487), 'os.path.join', 'os.path.join', (["current_app.config['UPLOAD_DIR']", 'filename'], {}), "(current_app.config['UPLOAD_DIR'], filename)\n", (1443, 1487), False, 'import os\n'), ((1564, 1597), 'utils.download_image_url', 'download_image_url', (['url', 'filepath'], {}), '(url, filepath)\n', (1582, 1597), False, 'from utils import download_image_url\n'), ((1613, 1637), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1627, 1637), False, 'import os\n'), ((1651, 1672), 'flask.flash', 'flash', (['"""无法下载指定URL的图片"""'], {}), "('无法下载指定URL的图片')\n", (1656, 1672), False, 'from flask import flash\n'), ((1701, 1718), 'flask.url_for', 'url_for', (['""".index"""'], {}), "('.index')\n", (1708, 1718), False, 'from flask import url_for\n'), ((1762, 1799), 'flask.url_for', 'url_for', (['""".result"""'], {'filename': 'filename'}), "('.result', filename=filename)\n", (1769, 1799), False, 'from flask import url_for\n')]
|
import unittest
import math
import pyomo.environ as pe
import coramin
import numpy as np
from coramin.relaxations.segments import compute_k_segment_points
class TestUnivariateExp(unittest.TestCase):
@classmethod
def setUpClass(cls):
model = pe.ConcreteModel()
cls.model = model
model.y = pe.Var()
model.x = pe.Var(bounds=(-1.5, 1.5))
model.obj = pe.Objective(expr=model.y, sense=pe.maximize)
model.pw_exp = coramin.relaxations.PWUnivariateRelaxation()
model.pw_exp.build(x=model.x, aux_var=model.y, pw_repn='INC', shape=coramin.utils.FunctionShape.CONVEX,
relaxation_side=coramin.utils.RelaxationSide.BOTH, f_x_expr=pe.exp(model.x))
model.pw_exp.add_partition_point(-0.5)
model.pw_exp.add_partition_point(0.5)
model.pw_exp.rebuild()
@classmethod
def tearDownClass(cls):
pass
def test_exp_ub(self):
model = self.model.clone()
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), math.exp(1.5), 4)
def test_exp_mid(self):
model = self.model.clone()
model.x_con = pe.Constraint(expr=model.x <= 0.3)
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), 1.44, 3)
def test_exp_lb(self):
model = self.model.clone()
model.obj.sense = pe.minimize
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), math.exp(-1.5), 4)
class TestUnivariate(unittest.TestCase):
def helper(self, func, shape, bounds_list, relaxation_class, relaxation_side=coramin.utils.RelaxationSide.BOTH):
for lb, ub in bounds_list:
num_segments_list = [1, 2, 3]
m = pe.ConcreteModel()
m.x = pe.Var(bounds=(lb, ub))
m.aux = pe.Var()
if relaxation_class is coramin.relaxations.PWUnivariateRelaxation:
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x,
aux_var=m.aux,
relaxation_side=relaxation_side,
shape=shape,
f_x_expr=func(m.x))
else:
m.c = relaxation_class()
m.c.build(x=m.x, aux_var=m.aux, relaxation_side=relaxation_side)
m.p = pe.Param(mutable=True, initialize=0)
m.c2 = pe.Constraint(expr=m.x == m.p)
opt = pe.SolverFactory('gurobi_persistent')
for num_segments in num_segments_list:
segment_points = compute_k_segment_points(m.x, num_segments)
m.c.clear_partitions()
for pt in segment_points:
m.c.add_partition_point(pt)
var_values = pe.ComponentMap()
var_values[m.x] = pt
m.c.add_oa_point(var_values=var_values)
m.c.rebuild()
opt.set_instance(m)
for _x in [float(i) for i in np.linspace(lb, ub, 10)]:
m.p.value = _x
opt.remove_constraint(m.c2)
opt.add_constraint(m.c2)
if relaxation_side in {coramin.utils.RelaxationSide.BOTH, coramin.utils.RelaxationSide.UNDER}:
m.obj = pe.Objective(expr=m.aux)
opt.set_objective(m.obj)
res = opt.solve()
self.assertEqual(res.solver.termination_condition, pe.TerminationCondition.optimal)
self.assertLessEqual(m.aux.value, func(_x) + 1e-10)
del m.obj
if relaxation_side in {coramin.utils.RelaxationSide.BOTH, coramin.utils.RelaxationSide.OVER}:
m.obj = pe.Objective(expr=m.aux, sense=pe.maximize)
opt.set_objective(m.obj)
res = opt.solve()
self.assertEqual(res.solver.termination_condition, pe.TerminationCondition.optimal)
self.assertGreaterEqual(m.aux.value, func(_x) - 1e-10)
del m.obj
def test_exp(self):
self.helper(func=pe.exp, shape=coramin.utils.FunctionShape.CONVEX, bounds_list=[(-1, 1)],
relaxation_class=coramin.relaxations.PWUnivariateRelaxation)
def test_log(self):
self.helper(func=pe.log, shape=coramin.utils.FunctionShape.CONCAVE, bounds_list=[(0.5, 1.5)],
relaxation_class=coramin.relaxations.PWUnivariateRelaxation)
def test_quadratic(self):
def quadratic_func(x):
return x**2
self.helper(func=quadratic_func, shape=None, bounds_list=[(-1, 2)],
relaxation_class=coramin.relaxations.PWXSquaredRelaxation)
def test_arctan(self):
self.helper(func=pe.atan, shape=None, bounds_list=[(-1, 1), (-1, 0), (0, 1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation)
self.helper(func=pe.atan, shape=None, bounds_list=[(-0.1, 1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation,
relaxation_side=coramin.utils.RelaxationSide.OVER)
self.helper(func=pe.atan, shape=None, bounds_list=[(-1, 0.1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation,
relaxation_side=coramin.utils.RelaxationSide.UNDER)
def test_sin(self):
self.helper(func=pe.sin, shape=None, bounds_list=[(-1, 1), (-1, 0), (0, 1)],
relaxation_class=coramin.relaxations.PWSinRelaxation)
self.helper(func=pe.sin, shape=None, bounds_list=[(-0.1, 1)],
relaxation_class=coramin.relaxations.PWSinRelaxation,
relaxation_side=coramin.utils.RelaxationSide.OVER)
self.helper(func=pe.sin, shape=None, bounds_list=[(-1, 0.1)],
relaxation_class=coramin.relaxations.PWSinRelaxation,
relaxation_side=coramin.utils.RelaxationSide.UNDER)
def test_cos(self):
self.helper(func=pe.cos, shape=None, bounds_list=[(-1, 1)],
relaxation_class=coramin.relaxations.PWCosRelaxation)
class TestFeasibility(unittest.TestCase):
def test_univariate_exp(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = math.exp(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_pw_exp(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
m.c.add_partition_point(-0.25)
m.c.add_partition_point(0.25)
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = math.exp(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_univariate_log(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(0.5, 1.5))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONCAVE, f_x_expr=pe.log(m.x))
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [0.5, 0.75, 1, 1.25, 1.5]:
pval = math.log(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_pw_log(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(0.5, 1.5))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONCAVE, f_x_expr=pe.log(m.x))
m.c.add_partition_point(0.9)
m.c.add_partition_point(1.1)
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [0.5, 0.75, 1, 1.25, 1.5]:
pval = math.log(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_x_fixed(self):
m = pe.ConcreteModel()
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.x.fix(0)
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
self.assertEqual(id(m.c.x_fixed_con.body), id(m.y))
self.assertEqual(m.c.x_fixed_con.lower, 1.0)
self.assertEqual(m.c.x_fixed_con.upper, 1.0)
def test_x_sq(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWXSquaredRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH)
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = xval**2
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
|
[
"math.exp",
"pyomo.environ.log",
"pyomo.environ.SolverFactory",
"coramin.relaxations.PWUnivariateRelaxation",
"pyomo.environ.Constraint",
"pyomo.environ.Var",
"pyomo.environ.value",
"pyomo.environ.Objective",
"pyomo.environ.exp",
"coramin.relaxations.segments.compute_k_segment_points",
"numpy.linspace",
"pyomo.environ.Param",
"pyomo.environ.ConcreteModel",
"math.log",
"pyomo.environ.ComponentMap",
"pyomo.environ.ConstraintList",
"coramin.relaxations.PWXSquaredRelaxation"
] |
[((259, 277), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (275, 277), True, 'import pyomo.environ as pe\n'), ((322, 330), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (328, 330), True, 'import pyomo.environ as pe\n'), ((349, 375), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1.5, 1.5)'}), '(bounds=(-1.5, 1.5))\n', (355, 375), True, 'import pyomo.environ as pe\n'), ((397, 442), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'model.y', 'sense': 'pe.maximize'}), '(expr=model.y, sense=pe.maximize)\n', (409, 442), True, 'import pyomo.environ as pe\n'), ((466, 510), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (508, 510), False, 'import coramin\n'), ((991, 1024), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1007, 1024), True, 'import pyomo.environ as pe\n'), ((1207, 1241), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(model.x <= 0.3)'}), '(expr=model.x <= 0.3)\n', (1220, 1241), True, 'import pyomo.environ as pe\n'), ((1260, 1293), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1276, 1293), True, 'import pyomo.environ as pe\n'), ((1500, 1533), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1516, 1533), True, 'import pyomo.environ as pe\n'), ((6452, 6470), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (6468, 6470), True, 'import pyomo.environ as pe\n'), ((6485, 6522), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (6493, 6522), True, 'import pyomo.environ as pe\n'), ((6537, 6559), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (6543, 6559), True, 'import pyomo.environ as pe\n'), ((6574, 6582), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (6580, 6582), True, 'import pyomo.environ as pe\n'), ((6597, 6621), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (6603, 6621), True, 'import pyomo.environ as pe\n'), ((6636, 6680), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (6678, 6680), False, 'import coramin\n'), ((6889, 6908), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (6906, 6908), True, 'import pyomo.environ as pe\n'), ((6995, 7017), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (7007, 7017), True, 'import pyomo.environ as pe\n'), ((7032, 7065), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (7048, 7065), True, 'import pyomo.environ as pe\n'), ((7437, 7455), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (7453, 7455), True, 'import pyomo.environ as pe\n'), ((7470, 7507), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (7478, 7507), True, 'import pyomo.environ as pe\n'), ((7522, 7544), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (7528, 7544), True, 'import pyomo.environ as pe\n'), ((7559, 7567), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (7565, 7567), True, 'import pyomo.environ as pe\n'), ((7582, 7606), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (7588, 7606), True, 'import pyomo.environ as pe\n'), ((7621, 7665), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (7663, 7665), False, 'import coramin\n'), ((7951, 7970), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (7968, 7970), True, 'import pyomo.environ as pe\n'), ((8057, 8079), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (8069, 8079), True, 'import pyomo.environ as pe\n'), ((8094, 8127), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (8110, 8127), True, 'import pyomo.environ as pe\n'), ((8507, 8525), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (8523, 8525), True, 'import pyomo.environ as pe\n'), ((8540, 8577), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (8548, 8577), True, 'import pyomo.environ as pe\n'), ((8592, 8617), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0.5, 1.5)'}), '(bounds=(0.5, 1.5))\n', (8598, 8617), True, 'import pyomo.environ as pe\n'), ((8632, 8640), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (8638, 8640), True, 'import pyomo.environ as pe\n'), ((8655, 8679), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (8661, 8679), True, 'import pyomo.environ as pe\n'), ((8694, 8738), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (8736, 8738), False, 'import coramin\n'), ((8948, 8967), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (8965, 8967), True, 'import pyomo.environ as pe\n'), ((9054, 9076), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (9066, 9076), True, 'import pyomo.environ as pe\n'), ((9091, 9124), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (9107, 9124), True, 'import pyomo.environ as pe\n'), ((9500, 9518), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (9516, 9518), True, 'import pyomo.environ as pe\n'), ((9533, 9570), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (9541, 9570), True, 'import pyomo.environ as pe\n'), ((9585, 9610), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0.5, 1.5)'}), '(bounds=(0.5, 1.5))\n', (9591, 9610), True, 'import pyomo.environ as pe\n'), ((9625, 9633), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (9631, 9633), True, 'import pyomo.environ as pe\n'), ((9648, 9672), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (9654, 9672), True, 'import pyomo.environ as pe\n'), ((9687, 9731), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (9729, 9731), False, 'import coramin\n'), ((10015, 10034), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (10032, 10034), True, 'import pyomo.environ as pe\n'), ((10121, 10143), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (10133, 10143), True, 'import pyomo.environ as pe\n'), ((10158, 10191), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (10174, 10191), True, 'import pyomo.environ as pe\n'), ((10568, 10586), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (10584, 10586), True, 'import pyomo.environ as pe\n'), ((10601, 10623), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (10607, 10623), True, 'import pyomo.environ as pe\n'), ((10638, 10646), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (10644, 10646), True, 'import pyomo.environ as pe\n'), ((10680, 10724), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (10722, 10724), False, 'import coramin\n'), ((11100, 11118), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (11116, 11118), True, 'import pyomo.environ as pe\n'), ((11133, 11170), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (11141, 11170), True, 'import pyomo.environ as pe\n'), ((11185, 11207), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (11191, 11207), True, 'import pyomo.environ as pe\n'), ((11222, 11230), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (11228, 11230), True, 'import pyomo.environ as pe\n'), ((11245, 11269), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (11251, 11269), True, 'import pyomo.environ as pe\n'), ((11284, 11326), 'coramin.relaxations.PWXSquaredRelaxation', 'coramin.relaxations.PWXSquaredRelaxation', ([], {}), '()\n', (11324, 11326), False, 'import coramin\n'), ((11431, 11450), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (11448, 11450), True, 'import pyomo.environ as pe\n'), ((11537, 11559), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (11549, 11559), True, 'import pyomo.environ as pe\n'), ((11574, 11607), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (11590, 11607), True, 'import pyomo.environ as pe\n'), ((1084, 1101), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1092, 1101), True, 'import pyomo.environ as pe\n'), ((1103, 1116), 'math.exp', 'math.exp', (['(1.5)'], {}), '(1.5)\n', (1111, 1116), False, 'import math\n'), ((1353, 1370), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1361, 1370), True, 'import pyomo.environ as pe\n'), ((1593, 1610), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1601, 1610), True, 'import pyomo.environ as pe\n'), ((1612, 1626), 'math.exp', 'math.exp', (['(-1.5)'], {}), '(-1.5)\n', (1620, 1626), False, 'import math\n'), ((1884, 1902), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (1900, 1902), True, 'import pyomo.environ as pe\n'), ((1921, 1944), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(lb, ub)'}), '(bounds=(lb, ub))\n', (1927, 1944), True, 'import pyomo.environ as pe\n'), ((1965, 1973), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (1971, 1973), True, 'import pyomo.environ as pe\n'), ((2496, 2532), 'pyomo.environ.Param', 'pe.Param', ([], {'mutable': '(True)', 'initialize': '(0)'}), '(mutable=True, initialize=0)\n', (2504, 2532), True, 'import pyomo.environ as pe\n'), ((2552, 2582), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(m.x == m.p)'}), '(expr=m.x == m.p)\n', (2565, 2582), True, 'import pyomo.environ as pe\n'), ((2601, 2638), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_persistent"""'], {}), "('gurobi_persistent')\n", (2617, 2638), True, 'import pyomo.environ as pe\n'), ((7128, 7142), 'math.exp', 'math.exp', (['xval'], {}), '(xval)\n', (7136, 7142), False, 'import math\n'), ((8190, 8204), 'math.exp', 'math.exp', (['xval'], {}), '(xval)\n', (8198, 8204), False, 'import math\n'), ((9191, 9205), 'math.log', 'math.log', (['xval'], {}), '(xval)\n', (9199, 9205), False, 'import math\n'), ((10258, 10272), 'math.log', 'math.log', (['xval'], {}), '(xval)\n', (10266, 10272), False, 'import math\n'), ((710, 725), 'pyomo.environ.exp', 'pe.exp', (['model.x'], {}), '(model.x)\n', (716, 725), True, 'import pyomo.environ as pe\n'), ((2075, 2119), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (2117, 2119), False, 'import coramin\n'), ((2723, 2766), 'coramin.relaxations.segments.compute_k_segment_points', 'compute_k_segment_points', (['m.x', 'num_segments'], {}), '(m.x, num_segments)\n', (2747, 2766), False, 'from coramin.relaxations.segments import compute_k_segment_points\n'), ((6839, 6850), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (6845, 6850), True, 'import pyomo.environ as pe\n'), ((7824, 7835), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (7830, 7835), True, 'import pyomo.environ as pe\n'), ((8898, 8909), 'pyomo.environ.log', 'pe.log', (['m.x'], {}), '(m.x)\n', (8904, 8909), True, 'import pyomo.environ as pe\n'), ((9891, 9902), 'pyomo.environ.log', 'pe.log', (['m.x'], {}), '(m.x)\n', (9897, 9902), True, 'import pyomo.environ as pe\n'), ((10883, 10894), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (10889, 10894), True, 'import pyomo.environ as pe\n'), ((2929, 2946), 'pyomo.environ.ComponentMap', 'pe.ComponentMap', ([], {}), '()\n', (2944, 2946), True, 'import pyomo.environ as pe\n'), ((3159, 3182), 'numpy.linspace', 'np.linspace', (['lb', 'ub', '(10)'], {}), '(lb, ub, 10)\n', (3170, 3182), True, 'import numpy as np\n'), ((3460, 3484), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.aux'}), '(expr=m.aux)\n', (3472, 3484), True, 'import pyomo.environ as pe\n'), ((3940, 3983), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.aux', 'sense': 'pe.maximize'}), '(expr=m.aux, sense=pe.maximize)\n', (3952, 3983), True, 'import pyomo.environ as pe\n')]
|
import sys
import re
import time
import argparse
from collections import namedtuple, deque
from itertools import cycle, chain, repeat
import numpy as np
from PIL import Image
import rgbmatrix as rgb
sys.path.append("/home/pi/pixel_art/")
from settings import (NES_PALETTE_HEX, dispmatrix)
from core import *
from sprites.zelda2 import zelda2_animation
from sprites.finalfantasy import finalfantasy_animation
from sprites.megaman2 import megaman2_animation
from sprites.ninjagaiden import ninjagaiden_animation
from sprites.blastermaster import blastermaster_animation
from sprites.dragonwarrior import dragonwarrior_animation
from sprites.supermariobros3 import smb3_animation
from sprites.castlevania3 import castlevania3_animation
from sprites.dragonstrike import dragonstrike_animation
from sprites.excitebike import excitebike_animation
from sprites.kirbysadventure import kirbysadventure_animation
from sprites.lifeforce import lifeforce_animation
from sprites.ducktales import ducktales_animation
from sprites.ghostsandgoblins import ghostsandgoblins_animation
from sprites.batman import batman_animation
from sprites.metalgear import metalgear_animation
from sprites.kabukiquantumfighter import kabukiquantumfighter_animation
def parse_arguments():
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
pass
desc = ("Run 8-bit pixel art animation montage on 32 x 32 RGB LED display")
epilog = """
"""
parser = argparse.ArgumentParser(description=desc,
add_help=False,
epilog=epilog,
formatter_class=CustomFormatter)
opt = parser.add_argument_group("Optional arguments")
opt.add_argument("-c", "--cycletime",
action="store",
dest="cycletime",
help=("Number of seconds to run each animation routine "
"(default: 10)"),
default=10,
type=int,
metavar="INT")
opt.add_argument("-s", "--shuffle",
action="store_true",
dest="shuffle",
help="Shuffle sequence of of animations prior to launch")
opt.add_argument("-a", "--cycleall",
action="store_true",
dest="cycleall",
help="Cycle through all sprites in a scene rather than choosing one at random "
)
opt.add_argument("-h", "--help",
action="help",
help="show this help message and exit")
return parser.parse_args()
def main():
args = parse_arguments()
shuffle = args.shuffle
scenes = [
excitebike_animation,
ghostsandgoblins_animation,
lifeforce_animation,
blastermaster_animation,
metalgear_animation,
zelda2_animation,
dragonwarrior_animation,
ducktales_animation,
megaman2_animation,
ninjagaiden_animation,
batman_animation,
finalfantasy_animation,
castlevania3_animation,
smb3_animation,
kabukiquantumfighter_animation,
dragonstrike_animation,
kirbysadventure_animation,
]
if shuffle:
np.random.shuffle(scenes)
scenes = deque(scenes)
#Clear the display in case anything's still on it.
dispmatrix.Clear()
#Seed the display with black for the first transition
arr = display_sprite(dispmatrix=dispmatrix,
sprite=scenes[0].bg_sprites[0],
bg_sprite=None,
center=True,
xoff=0,
yoff=0,
display=False)
arr1 = np.full((arr.shape[0], arr.shape[1], 3), convert_hex_to_rgb_tuple("000000"), dtype=np.uint8)
while True:
arr1 = animate_sprites(dispmatrix=dispmatrix,
sprite_list=scenes[0].sprite_list,
bg_sprites=scenes[0].bg_sprites,
xoffs=scenes[0].xoffs,
yoffs=scenes[0].yoffs,
frame_time=scenes[0].frame_time,
spbg_ratio=scenes[0].spbg_ratio,
center=scenes[0].center,
bg_scroll_speed=scenes[0].bg_scroll_speed,
cycle_time=args.cycletime,
clear=False,
transition=True,
transition_arr=arr1,
cycles_per_char=scenes[0].cycles_per_char,
cycle_all=args.cycleall
)
scenes.rotate(-1)
if __name__ == "__main__":
try:
main()
# If the script is killed by ctrl-c, clear the display.
except KeyboardInterrupt:
dispmatrix.Clear()
|
[
"sys.path.append",
"numpy.random.shuffle",
"argparse.ArgumentParser",
"collections.deque",
"settings.dispmatrix.Clear"
] |
[((203, 241), 'sys.path.append', 'sys.path.append', (['"""/home/pi/pixel_art/"""'], {}), "('/home/pi/pixel_art/')\n", (218, 241), False, 'import sys\n'), ((1462, 1571), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'add_help': '(False)', 'epilog': 'epilog', 'formatter_class': 'CustomFormatter'}), '(description=desc, add_help=False, epilog=epilog,\n formatter_class=CustomFormatter)\n', (1485, 1571), False, 'import argparse\n'), ((3482, 3495), 'collections.deque', 'deque', (['scenes'], {}), '(scenes)\n', (3487, 3495), False, 'from collections import namedtuple, deque\n'), ((3556, 3574), 'settings.dispmatrix.Clear', 'dispmatrix.Clear', ([], {}), '()\n', (3572, 3574), False, 'from settings import NES_PALETTE_HEX, dispmatrix\n'), ((3442, 3467), 'numpy.random.shuffle', 'np.random.shuffle', (['scenes'], {}), '(scenes)\n', (3459, 3467), True, 'import numpy as np\n'), ((5130, 5148), 'settings.dispmatrix.Clear', 'dispmatrix.Clear', ([], {}), '()\n', (5146, 5148), False, 'from settings import NES_PALETTE_HEX, dispmatrix\n')]
|
#!/usr/bin/python
from __future__ import print_function
from logbot import AutoLogger
import logging
import sys
config = {
'stream' : sys.stdout,
'level' : logging.DEBUG
}
print('instantiating new logbot class')
log = AutoLogger(app_name='testapp', config=config)
@log.autolog
def test(a, b, c, d, *args, **kwargs):
return 'hello world'
@log.autolog
def extest():
raise TypeError('wat')
return None
test('a', 'b', 'c', 'd', 'e', 'f', g='g', h='h')
extest()
log.debug('hello')
|
[
"logbot.AutoLogger"
] |
[((229, 274), 'logbot.AutoLogger', 'AutoLogger', ([], {'app_name': '"""testapp"""', 'config': 'config'}), "(app_name='testapp', config=config)\n", (239, 274), False, 'from logbot import AutoLogger\n')]
|
'''
KnockoffGAN Knockoff Variable Generation
<NAME> (9/27/2018)
'''
#%% Necessary Packages
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import logging
import argparse
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#%% KnockoffGAN Function
'''
Inputs:
x_train: Training data
lamda: Power network parameter = 0.01
mu: WGAN parameter = 1
'''
logger = logging.getLogger()
def KnockoffGAN (x_train, x_name, lamda = 0.01, mu = 1, mb_size=128, niter=2000):
tf_debug = False
if tf_debug:
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
config = tf.ConfigProto()
config.log_device_placement=True
config.gpu_options.allow_growth = True
else:
run_opts = None
config = None
#%% Parameters
# 1. # of samples
n = len(x_train[:,0])
# 2. # of features
x_dim = len(x_train[0,:])
# 3. # of random dimensions
z_dim = int(x_dim)
# 4. # of hidden dimensions
h_dim = int(x_dim)
# 5. # of minibatch
# mb_size = 128
# 6. WGAN parameters
lam = 10
lr = 1e-4
#%% Necessary Functions
# 1. Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 2. Sample from normal distribution: Random variable generation
def sample_Z(m, n, x_name):
if ((x_name == 'Normal') | (x_name == 'AR_Normal')):
return np.random.normal(0., np.sqrt(1./3000), size = [m, n]).copy()
elif ((x_name == 'Uniform') | (x_name == 'AR_Uniform')):
return np.random.uniform(-3*np.sqrt(1./3000),3*np.sqrt(1./3000),[m,n]).copy()
# 3. Sample from the real data (Mini-batch index sampling)
def sample_X(m, n):
return np.random.permutation(m)[:n].copy()
# 4. Permutation for MINE computation
def Permute (x):
n = len(x[:,0])
idx = np.random.permutation(n)
out = x[idx,:].copy()
return out
# 5. Bernoulli sampling for Swap and Hint variables
def sample_SH(m, n, p):
return np.random.binomial(1, p, [m,n]).copy()
#%% Placeholder inputs
# 1. Feature
X = tf.placeholder(tf.float32, shape = [None, x_dim])
# 2. Feature (Permute)
X_hat = tf.placeholder(tf.float32, shape = [None, x_dim])
# 3. Random Variable
Z = tf.placeholder(tf.float32, shape = [None, z_dim])
# 4. Swap
S = tf.placeholder(tf.float32, shape = [None, x_dim])
# 5. Hint
H = tf.placeholder(tf.float32, shape = [None, x_dim])
#%% Network Building
#%% 1. Discriminator
# Input: Swap (X, tilde X) and Hint
D_W1 = tf.Variable(xavier_init([x_dim + x_dim + x_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
#%% 2. WGAN Discriminator
# Input: tilde X
WD_W1 = tf.Variable(xavier_init([x_dim, h_dim]))
WD_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
WD_W2 = tf.Variable(xavier_init([h_dim,1]))
WD_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_WD = [WD_W1, WD_W2, WD_b1, WD_b2]
#%% 3. Generator
# Input: X and Z
G_W1 = tf.Variable(xavier_init([x_dim + z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
#%% 4. MINE
# Input: X and tilde X
# For X
M_W1A = tf.Variable(xavier_init([x_dim]))
M_W1B = tf.Variable(xavier_init([x_dim]))
M_b1 = tf.Variable(tf.zeros(shape=[x_dim]))
# For tilde X
M_W2A = tf.Variable(xavier_init([x_dim]))
M_W2B = tf.Variable(xavier_init([x_dim]))
M_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
# Combine
M_W3 = tf.Variable(xavier_init([x_dim]))
M_b3 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_M = [M_W1A, M_W1B, M_W2A, M_W2B, M_W3, M_b1, M_b2, M_b3]
#%% Functions
# 1. Generator
def generator(x, z):
inputs = tf.concat(axis=1, values = [x, z])
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_out = (tf.matmul(G_h1, G_W2) + G_b2)
return G_out
# 2. Discriminator
def discriminator(sA, sB, h):
inputs = tf.concat(axis=1, values = [sA, sB, h])
D_h1 = tf.nn.tanh(tf.matmul(inputs, D_W1) + D_b1)
D_out = tf.nn.sigmoid(tf.matmul(D_h1, D_W2) + D_b2)
return D_out
# 3. WGAN Discriminator
def WGAN_discriminator(x):
WD_h1 = tf.nn.relu(tf.matmul(x, WD_W1) + WD_b1)
WD_out = (tf.matmul(WD_h1, WD_W2) + WD_b2)
return WD_out
# 4. MINE
def MINE(x, x_hat):
M_h1 = tf.nn.tanh(M_W1A * x + M_W1B * x_hat + M_b1)
M_h2 = tf.nn.tanh(M_W2A * x + M_W2B * x_hat + M_b2)
M_out = (M_W3 * (M_h1 + M_h2) + M_b3)
Exp_M_out = tf.exp(M_out)
return M_out, Exp_M_out
#%% Combination across the networks
# 1. Generater Knockoffs
G_sample = generator(X,Z)
# 2. WGAN Outputs for real and fake
WD_real = WGAN_discriminator(X)
WD_fake = WGAN_discriminator(G_sample)
# 3. Generate swapping (X, tilde X)
SwapA = S * X + (1-S) * G_sample
SwapB = (1-S) * X + S * G_sample
# 4. Discriminator output
# (X, tilde X) is SwapA, SwapB. Hint is generated by H * S
D_out = discriminator(SwapA, SwapB, H*S)
# 5. MINE Computation
# Without permutation
M_out, _ = MINE(X, G_sample)
# Wit permutation
_, Exp_M_out = MINE(X_hat, G_sample)
# 6. WGAN Loss Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([mb_size, 1], minval = 0., maxval = 1.)
X_inter = eps*X + (1. - eps) * G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(WGAN_discriminator(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2 + 1e-8, axis = 1))
grad_pen = lam * tf.reduce_mean((grad_norm - 1)**2)
#%% Loss function
# 1. WGAN Loss
WD_loss = tf.reduce_mean(WD_fake) - tf.reduce_mean(WD_real) + grad_pen
# 2. Discriminator loss
D_loss = -tf.reduce_mean(S * (1-H) * tf.log(D_out + 1e-8) + (1-S) * (1-H) * tf.log(1 - D_out + 1e-8))
# 3. MINE Loss
M_loss = tf.reduce_sum( tf.reduce_mean(M_out, axis = 0) - tf.log(tf.reduce_mean(Exp_M_out, axis = 0)) )
# 4. Generator loss
G_loss = - D_loss + mu * -tf.reduce_mean(WD_fake) + lamda * M_loss
# Solver
WD_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(WD_loss, var_list = theta_WD))
D_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(D_loss, var_list = theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(G_loss, var_list = theta_G))
M_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(-M_loss, var_list = theta_M))
#%% Sessions
if tf_debug:
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer(), options=run_opts)
else:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#%% Iterations
for it in tqdm(range(niter)):
for dummy_range in range(5):
#%% WGAN, Discriminator and MINE Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.9)
# 1. WGAN Training
_, WD_loss_curr = sess.run([WD_solver, WD_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 2. Discriminator Training
# print('discriminator training')
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 3. MINE Training
# print('mine training')
_, M_loss_curr = sess.run([M_solver, M_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Generator Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.0)
# Generator training
# print('gen training')
_, G_loss_curr, G_sample_curr = sess.run([G_solver, G_loss, G_sample], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Output
#print('last session run')
X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)}, options=run_opts)[0]
# X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)})[0]
#print('closing session')
sess.close()
tf.reset_default_graph()
return X_knockoff
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i')
parser.add_argument(
'-o')
parser.add_argument(
'--bs', default=128, type=int)
parser.add_argument(
'--it', default=2000, type=int)
parser.add_argument(
'--target')
parser.add_argument(
'--xname', default='Normal', help='Sample distribution [Normal, Uniform]')
parser.add_argument(
'--scale', default=1, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
df = pd.read_csv(args.i)
niter = args.it
use_scale = args.scale
x_name = args.xname
lbl = args.target
features = list(df.columns)
features.remove(lbl)
# scale/normalize dataset
range_scaler = (0, 1)
scaler = MinMaxScaler(feature_range=range_scaler)
x = df[features]
if use_scale:
scaler.fit(x)
x = scaler.transform(x)
else:
x = x.values
x_k = KnockoffGAN(
x,
x_name,
mb_size=args.bs,
niter=niter)
df_k = pd.DataFrame(x_k, columns=features)
df_k[lbl] = df[lbl]
df_k.to_csv(args.o, index=False)
|
[
"tensorflow.reduce_sum",
"argparse.ArgumentParser",
"tensorflow.nn.tanh",
"pandas.read_csv",
"tensorflow.reset_default_graph",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.ConfigProto",
"tensorflow.matmul",
"tensorflow.sqrt",
"tensorflow.RunOptions",
"pandas.DataFrame",
"tensorflow.concat",
"tensorflow.placeholder",
"tensorflow.exp",
"numpy.random.binomial",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"tensorflow.random_normal",
"tensorflow.log",
"numpy.random.permutation",
"tensorflow.random_uniform",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"logging.getLogger",
"numpy.sqrt"
] |
[((394, 413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (411, 413), False, 'import logging\n'), ((2404, 2451), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2418, 2451), True, 'import tensorflow as tf\n'), ((2496, 2543), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2510, 2543), True, 'import tensorflow as tf\n'), ((2586, 2633), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, z_dim]'}), '(tf.float32, shape=[None, z_dim])\n', (2600, 2633), True, 'import tensorflow as tf\n'), ((2658, 2705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2672, 2705), True, 'import tensorflow as tf\n'), ((2733, 2780), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2747, 2780), True, 'import tensorflow as tf\n'), ((6162, 6217), 'tensorflow.random_uniform', 'tf.random_uniform', (['[mb_size, 1]'], {'minval': '(0.0)', 'maxval': '(1.0)'}), '([mb_size, 1], minval=0.0, maxval=1.0)\n', (6179, 6217), True, 'import tensorflow as tf\n'), ((10092, 10116), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10114, 10116), True, 'import tensorflow as tf\n'), ((10170, 10195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10193, 10195), False, 'import argparse\n'), ((10713, 10732), 'pandas.read_csv', 'pd.read_csv', (['args.i'], {}), '(args.i)\n', (10724, 10732), True, 'import pandas as pd\n'), ((10953, 10993), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': 'range_scaler'}), '(feature_range=range_scaler)\n', (10965, 10993), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((11228, 11263), 'pandas.DataFrame', 'pd.DataFrame', (['x_k'], {'columns': 'features'}), '(x_k, columns=features)\n', (11240, 11263), True, 'import pandas as pd\n'), ((561, 615), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'report_tensor_allocations_upon_oom': '(True)'}), '(report_tensor_allocations_upon_oom=True)\n', (574, 615), True, 'import tensorflow as tf\n'), ((635, 651), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (649, 651), True, 'import tensorflow as tf\n'), ((1366, 1416), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'size', 'stddev': 'xavier_stddev'}), '(shape=size, stddev=xavier_stddev)\n', (1382, 1416), True, 'import tensorflow as tf\n'), ((2104, 2128), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2125, 2128), True, 'import numpy as np\n'), ((2994, 3017), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3002, 3017), True, 'import tensorflow as tf\n'), ((3098, 3121), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3106, 3121), True, 'import tensorflow as tf\n'), ((3304, 3327), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3312, 3327), True, 'import tensorflow as tf\n'), ((3406, 3425), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[1]'}), '(shape=[1])\n', (3414, 3425), True, 'import tensorflow as tf\n'), ((3606, 3629), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3614, 3629), True, 'import tensorflow as tf\n'), ((3710, 3733), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3718, 3733), True, 'import tensorflow as tf\n'), ((3962, 3985), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3970, 3985), True, 'import tensorflow as tf\n'), ((4125, 4148), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (4133, 4148), True, 'import tensorflow as tf\n'), ((4237, 4260), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (4245, 4260), True, 'import tensorflow as tf\n'), ((4426, 4458), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[x, z]'}), '(axis=1, values=[x, z])\n', (4435, 4458), True, 'import tensorflow as tf\n'), ((4679, 4716), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[sA, sB, h]'}), '(axis=1, values=[sA, sB, h])\n', (4688, 4716), True, 'import tensorflow as tf\n'), ((5155, 5199), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['(M_W1A * x + M_W1B * x_hat + M_b1)'], {}), '(M_W1A * x + M_W1B * x_hat + M_b1)\n', (5165, 5199), True, 'import tensorflow as tf\n'), ((5215, 5259), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['(M_W2A * x + M_W2B * x_hat + M_b2)'], {}), '(M_W2A * x + M_W2B * x_hat + M_b2)\n', (5225, 5259), True, 'import tensorflow as tf\n'), ((5335, 5348), 'tensorflow.exp', 'tf.exp', (['M_out'], {}), '(M_out)\n', (5341, 5348), True, 'import tensorflow as tf\n'), ((6391, 6431), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(grad ** 2 + 1e-08)'], {'axis': '(1)'}), '(grad ** 2 + 1e-08, axis=1)\n', (6404, 6431), True, 'import tensorflow as tf\n'), ((6455, 6491), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((grad_norm - 1) ** 2)'], {}), '((grad_norm - 1) ** 2)\n', (6469, 6491), True, 'import tensorflow as tf\n'), ((7519, 7544), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7529, 7544), True, 'import tensorflow as tf\n'), ((7640, 7652), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7650, 7652), True, 'import tensorflow as tf\n'), ((1330, 1351), 'tensorflow.sqrt', 'tf.sqrt', (['(in_dim / 2.0)'], {}), '(in_dim / 2.0)\n', (1337, 1351), True, 'import tensorflow as tf\n'), ((4536, 4557), 'tensorflow.matmul', 'tf.matmul', (['G_h1', 'G_W2'], {}), '(G_h1, G_W2)\n', (4545, 4557), True, 'import tensorflow as tf\n'), ((5013, 5036), 'tensorflow.matmul', 'tf.matmul', (['WD_h1', 'WD_W2'], {}), '(WD_h1, WD_W2)\n', (5022, 5036), True, 'import tensorflow as tf\n'), ((6550, 6573), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_fake'], {}), '(WD_fake)\n', (6564, 6573), True, 'import tensorflow as tf\n'), ((6576, 6599), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_real'], {}), '(WD_real)\n', (6590, 6599), True, 'import tensorflow as tf\n'), ((6805, 6834), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['M_out'], {'axis': '(0)'}), '(M_out, axis=0)\n', (6819, 6834), True, 'import tensorflow as tf\n'), ((7021, 7072), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7043, 7072), True, 'import tensorflow as tf\n'), ((7133, 7184), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7155, 7184), True, 'import tensorflow as tf\n'), ((7243, 7294), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7265, 7294), True, 'import tensorflow as tf\n'), ((7353, 7404), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7375, 7404), True, 'import tensorflow as tf\n'), ((7562, 7595), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7593, 7595), True, 'import tensorflow as tf\n'), ((7670, 7703), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7701, 7703), True, 'import tensorflow as tf\n'), ((2294, 2326), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', '[m, n]'], {}), '(1, p, [m, n])\n', (2312, 2326), True, 'import numpy as np\n'), ((4487, 4510), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'G_W1'], {}), '(inputs, G_W1)\n', (4496, 4510), True, 'import tensorflow as tf\n'), ((4745, 4768), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'D_W1'], {}), '(inputs, D_W1)\n', (4754, 4768), True, 'import tensorflow as tf\n'), ((4807, 4828), 'tensorflow.matmul', 'tf.matmul', (['D_h1', 'D_W2'], {}), '(D_h1, D_W2)\n', (4816, 4828), True, 'import tensorflow as tf\n'), ((4966, 4985), 'tensorflow.matmul', 'tf.matmul', (['x', 'WD_W1'], {}), '(x, WD_W1)\n', (4975, 4985), True, 'import tensorflow as tf\n'), ((6846, 6879), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['Exp_M_out'], {'axis': '(0)'}), '(Exp_M_out, axis=0)\n', (6860, 6879), True, 'import tensorflow as tf\n'), ((1950, 1974), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (1971, 1974), True, 'import numpy as np\n'), ((6685, 6706), 'tensorflow.log', 'tf.log', (['(D_out + 1e-08)'], {}), '(D_out + 1e-08)\n', (6691, 6706), True, 'import tensorflow as tf\n'), ((6724, 6749), 'tensorflow.log', 'tf.log', (['(1 - D_out + 1e-08)'], {}), '(1 - D_out + 1e-08)\n', (6730, 6749), True, 'import tensorflow as tf\n'), ((6945, 6968), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_fake'], {}), '(WD_fake)\n', (6959, 6968), True, 'import tensorflow as tf\n'), ((1640, 1659), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1647, 1659), True, 'import numpy as np\n'), ((1785, 1804), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1792, 1804), True, 'import numpy as np\n'), ((1804, 1823), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1811, 1823), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 19 13:39:52 2019
@author: <NAME>
"""
import tensorflow as tf
import os
from model.inference_model import InferenceModel
tf.app.flags.DEFINE_string('checkpoints_path', os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'checkpoints/')),
'Path for the test data.')
tf.app.flags.DEFINE_string('export_path_base', os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'model-export/')),
'Directory where to export the model.')
tf.app.flags.DEFINE_integer('model_version', 1, 'Version number of the model.')
tf.app.flags.DEFINE_integer('num_v', 3952,
'Number of visible neurons (Number of movies the users rated.)')
FLAGS = tf.app.flags.FLAGS
def run_inference():
inference_graph=tf.Graph()
with inference_graph.as_default():
model=InferenceModel(FLAGS)
input_data=tf.placeholder(tf.float32, shape=[None, 3952])
ratings=model.inference(input_data)
saver = tf.train.Saver()
with tf.Session(graph=inference_graph) as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoints_path)
saver.restore(sess, ckpt.model_checkpoint_path)
# Save the model
export_path = os.path.join(tf.compat.as_bytes(FLAGS.export_path_base),
tf.compat.as_bytes('model_v_%s'%str(FLAGS.model_version)))
print('Exporting trained model to %s'%export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# create tensors info
predict_tensor_inputs_info = tf.saved_model.utils.build_tensor_info(input_data)
predict_tensor_scores_info = tf.saved_model.utils.build_tensor_info(ratings)
# build prediction signature
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'inputs': predict_tensor_inputs_info},
outputs={'ratings': predict_tensor_scores_info},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
)
# save the model
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_ratings': prediction_signature
})
builder.save()
if __name__ == "__main__":
run_inference()
|
[
"tensorflow.saved_model.signature_def_utils.build_signature_def",
"tensorflow.train.Saver",
"tensorflow.saved_model.utils.build_tensor_info",
"os.path.dirname",
"model.inference_model.InferenceModel",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.Graph",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.compat.as_bytes",
"tensorflow.train.get_checkpoint_state"
] |
[((555, 634), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""model_version"""', '(1)', '"""Version number of the model."""'], {}), "('model_version', 1, 'Version number of the model.')\n", (582, 634), True, 'import tensorflow as tf\n'), ((636, 747), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_v"""', '(3952)', '"""Number of visible neurons (Number of movies the users rated.)"""'], {}), "('num_v', 3952,\n 'Number of visible neurons (Number of movies the users rated.)')\n", (663, 747), True, 'import tensorflow as tf\n'), ((847, 857), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (855, 857), True, 'import tensorflow as tf\n'), ((925, 946), 'model.inference_model.InferenceModel', 'InferenceModel', (['FLAGS'], {}), '(FLAGS)\n', (939, 946), False, 'from model.inference_model import InferenceModel\n'), ((967, 1013), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 3952]'}), '(tf.float32, shape=[None, 3952])\n', (981, 1013), True, 'import tensorflow as tf\n'), ((1085, 1101), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1099, 1101), True, 'import tensorflow as tf\n'), ((1117, 1150), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'inference_graph'}), '(graph=inference_graph)\n', (1127, 1150), True, 'import tensorflow as tf\n'), ((1184, 1237), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoints_path'], {}), '(FLAGS.checkpoints_path)\n', (1213, 1237), True, 'import tensorflow as tf\n'), ((1615, 1668), 'tensorflow.saved_model.builder.SavedModelBuilder', 'tf.saved_model.builder.SavedModelBuilder', (['export_path'], {}), '(export_path)\n', (1655, 1668), True, 'import tensorflow as tf\n'), ((1754, 1804), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['input_data'], {}), '(input_data)\n', (1792, 1804), True, 'import tensorflow as tf\n'), ((1842, 1889), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['ratings'], {}), '(ratings)\n', (1880, 1889), True, 'import tensorflow as tf\n'), ((1985, 2216), 'tensorflow.saved_model.signature_def_utils.build_signature_def', 'tf.saved_model.signature_def_utils.build_signature_def', ([], {'inputs': "{'inputs': predict_tensor_inputs_info}", 'outputs': "{'ratings': predict_tensor_scores_info}", 'method_name': 'tf.saved_model.signature_constants.PREDICT_METHOD_NAME'}), "(inputs={'inputs':\n predict_tensor_inputs_info}, outputs={'ratings':\n predict_tensor_scores_info}, method_name=tf.saved_model.\n signature_constants.PREDICT_METHOD_NAME)\n", (2039, 2216), True, 'import tensorflow as tf\n'), ((247, 272), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (262, 272), False, 'import os\n'), ((432, 457), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (447, 457), False, 'import os\n'), ((1364, 1406), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['FLAGS.export_path_base'], {}), '(FLAGS.export_path_base)\n', (1382, 1406), True, 'import tensorflow as tf\n')]
|
'''Author- <NAME>, Email- <EMAIL>, Year- 2022'''
import os,sys
import logging
from os import get_exec_path
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
from tqdm import tqdm
import torch
from self_supervised.core import ssl_loss
from self_supervised.apply import config
sys.path.append(os.path.dirname(__file__))
def pretrain_epoch_MPCS(gpu, current_epoch, epochs, batch_size, train_loader,
model, optimizer, criterion):
model.train()
total_loss = 0
epoch_response_dir = {}
with tqdm(total=batch_size * len(train_loader),
desc=f'Epoch {current_epoch}/{epochs}',
unit='img') as (pbar):
for idx, batch in enumerate(train_loader):
view1, view2 = batch[0], batch[1]
b, c, h, w = view1.size()
#for pytorch tranform
view1 = view1.cuda(gpu, non_blocking=True)
view2 = view2.cuda(gpu, non_blocking=True)
output_view1 = model(view1)
output_view2 = model(view2)
output = torch.cat(
[output_view1.unsqueeze(1),
output_view2.unsqueeze(1)], dim=1)
loss = criterion(output)
curr_loss = loss.item()
total_loss += curr_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
'''logging'''
#logging.info('minibatch: {idx} simCLR running_loss: {loss.item()}')
(pbar.set_postfix)(**{'loss (batch)': loss.item()})
pbar.update(view1.shape[0])
# Prepare epoch reponse and return
epoch_response_dir['model'] = model
epoch_response_dir['loss'] = total_loss/(batch_size*len(train_loader))
epoch_response_dir['image_pair'] = [view1, view2]
return epoch_response_dir
|
[
"os.path.dirname",
"logging.basicConfig"
] |
[((108, 184), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s: %(message)s"""'}), "(level=logging.INFO, format='%(levelname)s: %(message)s')\n", (127, 184), False, 'import logging\n'), ((320, 345), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (335, 345), False, 'import os, sys\n')]
|
#!/usr/bin/python
import numpy as np
import sys
import argparse
from assembler import symbolTable, singleInstr, doubleInstr, tripleInstr
from transcoder import key_note_length, offsetArr
from music21 import midi, note, chord
#The stack size and the program size are both 256 for easy addressing
STACK_SIZE = 256
PROG_SIZE = 256
#Create a `vm themed' error
class VMError(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return self.message
class BalladVM:
#Initialize the stack, stack pointer, et. al. as zero
def __init__(self):
self.stack = np.zeros(STACK_SIZE, dtype=np.uint8)
self.sp = np.uint8(0)
self.pc = np.uint8(0)
self.s_regs = np.zeros(8, dtype=np.uint8)
self.m_regs = np.zeros(8, dtype=np.uint8)
self.progmem = np.zeros(PROG_SIZE, dtype=np.uint8)
#This loads an object from the paired code and static
# memory files
def load_obj(self, obj_fname, statmem_fname):
obj_file = open(obj_fname, 'rb')
statmem_file = open(statmem_fname, 'rb')
statmem = statmem_file.read()
obj = obj_file.read()
for i in range(len(obj)):
self.progmem[i] = ord(obj[i])
self.statmem = []
for i in range(len(statmem)):
self.statmem.append(ord(statmem[i]))
#This loads an object from the proper midi file using music21
def load_midi(self, midi_fname):
midi_file = midi.base.MidiFile()
midi_file.open(midi_fname, 'rb')
midi_file.read()
if len(midi_file.tracks) != 2:
raise VMError(
'Error: Incorrect number of tracks in Ballad program: %d',
len(midi_file.tracks))
else:
#Stream 0 has the static data section
static_stream = midi.translate.midiTrackToStream(
midi_file.tracks[0])
#Stream 1 has the program code section
code_stream = midi.translate.midiTrackToStream(
midi_file.tracks[1])
tmp_note = note.Note()
tmp_chord = chord.Chord()
static_string = ''
code_string = ''
midi_key = -1
#Transcode the static data
for curr_note in static_stream:
midi_num = -1
note_type = type(curr_note)
if note_type == type(tmp_note):
midi_num = curr_note.midi
elif note_type == type(tmp_chord):
midi_num = curr_note[0].midi
if note_type == type(tmp_note) or \
note_type == type(tmp_chord):
if curr_note.duration.quarterLength == key_note_length and midi_key < 0:
midi_key = midi_num
elif midi_key >= 0:
curr_num = offsetArr[:midi_num - midi_key]
static_string += '%1x' % curr_num
midi_key = -1
#Transcode the program code
for curr_note in code_stream:
midi_num = -1
note_type = type(curr_note)
if note_type == type(tmp_note):
midi_num = curr_note.midi
elif note_type == type(tmp_chord):
midi_num = curr_note[0].midi
if note_type == type(tmp_note) or \
note_type == type(tmp_chord):
if curr_note.duration.quarterLength == key_note_length and midi_key < 0:
midi_key = midi_num
else:
curr_num = offsetArr[:midi_num - midi_key]
code_string += '%1x' % curr_num
self.statmem = []
for i in range(0, len(static_string), 2):
self.statmem.append(int(static_string[i: i+2], 16))
for i in range(0, len(code_string), 2):
self.progmem[i/2] = (int(code_string[i: i+2], 16))
#This will run one step of execution of the
# program
def exec_timestep(self):
#Fetch instruction at pc:
curr_opcode = self.progmem[self.pc]
if curr_opcode == 0:
pass
else:
curr_instr = symbolTable[:curr_opcode]
#Deal with the fact that or, and and print are
# all reserved words
if curr_instr == 'or' or \
curr_instr == 'and' \
or curr_instr == 'print':
curr_instr += '_'
func = getattr(self, curr_instr)
curr_instr = curr_instr.replace('_','')
#Determine how many arguments we need
# then run the appropriate instruction
if curr_instr in singleInstr:
self.pc += 1
func(self.progmem[self.pc])
elif curr_instr in doubleInstr:
self.pc += 1
arg1 = self.progmem[self.pc]
self.pc += 1
arg2 = self.progmem[self.pc]
func(arg1, arg2)
elif curr_instr in tripleInstr:
self.pc += 1
arg1 = self.progmem[self.pc]
self.pc += 1
arg2 = self.progmem[self.pc]
self.pc += 1
arg3 = self.progmem[self.pc]
func(arg1, arg2, arg3)
else:
raise VMError('Error: Instruction %s not found near offset %d'
% (curr_instr, self.pc))
self.pc += 1
#Mathematical functions:
def add(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] + self.m_regs[m1]
def sub(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] - self.m_regs[m1]
def mul(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] * self.m_regs[m1]
def div(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] / self.m_regs[m1]
def xor(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] ^ self.m_regs[m1]
def or_(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] | self.m_regs[m1]
def and_(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] & self.m_regs[m1]
def inv(self, m0):
self.m_regs[m0] = ~self.m_regs[m0]
def inc(self, m0):
self.m_regs[m0] += 1
def dec(self, m0):
self.m_regs[m0] -= 1
#Jump instructions:
#Subtraction of 1 is so that when the PC gets incremented,
# it ends up at in the correct places
def jmp(self, off):
self.pc = off - 1
def jeq(self, m0, m1, off):
if self.m_regs[m0] == self.m_regs[m1]:
self.pc = off - 1
def jne(self, m0, m1, off):
if self.m_regs[m0] != self.m_regs[m1]:
self.pc = off - 1
def jlt(self, m0, m1, off):
if self.m_regs[m0] < self.m_regs[m1]:
self.pc = off - 1
def jgt(self, m0, m1, off):
if self.m_regs[m0] > self.m_regs[m1]:
self.pc = off - 1
def jlte(self, m0, m1, off):
if self.m_regs[m0] <= self.m_regs[m1]:
self.pc = off - 1
def jgte(self, m0, m1, off):
if self.m_regs[m0] >= self.m_regs[m1]:
self.pc = off - 1
def ret(self, m0):
self.pc = self.m_regs[m0] - 1
#Memory instructions:
def push(self, s0):
self.stack[self.sp] = self.s_regs[s0]
self.sp += 1
def pop(self, s0):
self.s_regs[s0] = self.stack[self.sp]
self.sp -= 1
def lstat(self, s0, m0):
self.s_regs[s0] = self.statmem[
self.m_regs[m0]]
def stget(self, s0, m0):
self.s_regs[s0] = self.stack[
self.m_regs[m0]]
def stput(self, m0, s0):
self.stack[
self.m_regs[m0]] = self.s_regs[s0]
# - Move instructions
def movim(self, m0, byte):
self.m_regs[m0] = byte
def movis(self, s0, byte):
self.s_regs[s0] = byte
def movrm(self, m0, s0):
self.m_regs[m0] = self.s_regs[s0]
def movrs(self, s0, m0):
self.s_regs[s0] = self.m_regs[m0]
#Utility instructions
#This prints out a message
def print_(self, s0, s1):
message = (self.stack[
self.s_regs[s0]:
self.s_regs[s0] +
self.s_regs[s1]])
message = ''.join(['%c' % char for char in message])
stackString = ''.join([chr(char) for char in self.stack])
sys.stdout.write(message)
#This reads in to the stack
def read(self, m0, m1):
message = raw_input()
message = message[0:self.m_regs[m1]]
oLen = len(message)
mDiff = self.m_regs[m1] - oLen
message += mDiff * '\x00'
message = [ord(char) for char in message]
self.stack[self.m_regs[m0]:
self.m_regs[m0] +
self.m_regs[m1]] = message
self.m_regs[m1] = oLen
def main():
parser = argparse.ArgumentParser(
description='This is a VM to run Ballad (byte)code')
parser.add_argument('-bc', action='store_true',
help='Run Ballad through bytecode')
parser.add_argument('name', help='The name of the Ballad file(s)')
args = parser.parse_args()
#If we're running in bytecode mode, dispatch to the bytecode loader
if args.bc:
sm_name = args.name + '.smb'
obj_name = args.name + '.ob'
vm = BalladVM()
vm.load_obj(obj_name, sm_name)
else: #Otherwise, use the midi loader
vm = BalladVM()
vm.load_midi(args.name)
#Initialize the PC to the current one - this is how we track exit
# conditions
curr_pc = vm.pc
prev_pc = -1
while curr_pc != prev_pc:
#Loop until the program is done (the PC doesn't move anymore)
vm.exec_timestep()
prev_pc = curr_pc
curr_pc = vm.pc
if __name__ == '__main__':
main()
|
[
"sys.stdout.write",
"numpy.uint8",
"argparse.ArgumentParser",
"numpy.zeros",
"music21.midi.translate.midiTrackToStream",
"music21.midi.base.MidiFile",
"music21.chord.Chord",
"music21.note.Note"
] |
[((7199, 7275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This is a VM to run Ballad (byte)code"""'}), "(description='This is a VM to run Ballad (byte)code')\n", (7222, 7275), False, 'import argparse\n'), ((586, 622), 'numpy.zeros', 'np.zeros', (['STACK_SIZE'], {'dtype': 'np.uint8'}), '(STACK_SIZE, dtype=np.uint8)\n', (594, 622), True, 'import numpy as np\n'), ((635, 646), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (643, 646), True, 'import numpy as np\n'), ((659, 670), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (667, 670), True, 'import numpy as np\n'), ((687, 714), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'np.uint8'}), '(8, dtype=np.uint8)\n', (695, 714), True, 'import numpy as np\n'), ((731, 758), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'np.uint8'}), '(8, dtype=np.uint8)\n', (739, 758), True, 'import numpy as np\n'), ((776, 811), 'numpy.zeros', 'np.zeros', (['PROG_SIZE'], {'dtype': 'np.uint8'}), '(PROG_SIZE, dtype=np.uint8)\n', (784, 811), True, 'import numpy as np\n'), ((1331, 1351), 'music21.midi.base.MidiFile', 'midi.base.MidiFile', ([], {}), '()\n', (1349, 1351), False, 'from music21 import midi, note, chord\n'), ((6798, 6823), 'sys.stdout.write', 'sys.stdout.write', (['message'], {}), '(message)\n', (6814, 6823), False, 'import sys\n'), ((1615, 1668), 'music21.midi.translate.midiTrackToStream', 'midi.translate.midiTrackToStream', (['midi_file.tracks[0]'], {}), '(midi_file.tracks[0])\n', (1647, 1668), False, 'from music21 import midi, note, chord\n'), ((1733, 1786), 'music21.midi.translate.midiTrackToStream', 'midi.translate.midiTrackToStream', (['midi_file.tracks[1]'], {}), '(midi_file.tracks[1])\n', (1765, 1786), False, 'from music21 import midi, note, chord\n'), ((1806, 1817), 'music21.note.Note', 'note.Note', ([], {}), '()\n', (1815, 1817), False, 'from music21 import midi, note, chord\n'), ((1833, 1846), 'music21.chord.Chord', 'chord.Chord', ([], {}), '()\n', (1844, 1846), False, 'from music21 import midi, note, chord\n')]
|
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib_venn import venn3, venn3_circles
from sympy import ( symbols, solve )
x = symbols( 'x' )
U = 54
N = 2
C = U - N
A = 6
TGx = 10
SGx = 18
TSx = x + A
Tx = 22
Gx = 29
Sx = 39
TS = solve( ( Tx + Gx + Sx - SGx - TGx - TSx + A ) - C, x )[ 0 ]
TG = TGx - A
SG = SGx - A
T = Tx - TS - TG - A
G = Gx - SG - TG - A
S = Sx - SG - TS - A
v = venn3( subsets=( T, G, TG, S, TS, SG, A ) )
v.get_label_by_id('A').set_text('Tall')
v.get_label_by_id('B').set_text('Green Peas')
v.get_label_by_id('C').set_text('Smooth Peas')
print( 'Plants that are tall and smooth peas {0}'.format( TS + A ) )
print( 'Plants that are not smooth or green {0}'.format( T ) )
print( 'Plants that are not tall but have smooth AND green peas {0}'.format( SG ) )
plt.title( "Pea Plants" )
plt.show()
|
[
"matplotlib.pyplot.title",
"sympy.symbols",
"sympy.solve",
"matplotlib.pyplot.show",
"matplotlib_venn.venn3"
] |
[((172, 184), 'sympy.symbols', 'symbols', (['"""x"""'], {}), "('x')\n", (179, 184), False, 'from sympy import symbols, solve\n'), ((436, 475), 'matplotlib_venn.venn3', 'venn3', ([], {'subsets': '(T, G, TG, S, TS, SG, A)'}), '(subsets=(T, G, TG, S, TS, SG, A))\n', (441, 475), False, 'from matplotlib_venn import venn3, venn3_circles\n'), ((832, 855), 'matplotlib.pyplot.title', 'plt.title', (['"""Pea Plants"""'], {}), "('Pea Plants')\n", (841, 855), True, 'from matplotlib import pyplot as plt\n'), ((858, 868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (866, 868), True, 'from matplotlib import pyplot as plt\n'), ((281, 329), 'sympy.solve', 'solve', (['(Tx + Gx + Sx - SGx - TGx - TSx + A - C)', 'x'], {}), '(Tx + Gx + Sx - SGx - TGx - TSx + A - C, x)\n', (286, 329), False, 'from sympy import symbols, solve\n')]
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: <NAME> / Coopérative ARTEFACTS <<EMAIL>>
"""
Item Thematic tests
"""
import factory
import pytest
import sys
from django.core.management import call_command
from django.urls import reverse
from parameterized import parameterized
from rest_framework import status
from rest_framework.test import APITestCase
from .factories.item_thematic import ItemThematicFactory
from .fake_data.fake_sound import CleanMediaMixin
from ..models.item_thematic import ItemThematic
# Models related
from ..models.thematic import Thematic
from ..models.item import Item
from .keycloak import get_token
# Expected structure for Item_thematic objects
ITEMTHEMATIC_STRUCTURE = [
('id', int),
('item', dict),
('thematic', dict),
]
# Expected keys for MODEL objects
ITEMTHEMATIC_FIELDS = sorted(
[item[0] for item in ITEMTHEMATIC_STRUCTURE])
@pytest.mark.django_db
class TestItemThematicList(CleanMediaMixin, APITestCase):
"""
This class manage all ItemThematic tests
"""
def setUp(self):
"""
Run needed commands to have a fully working project
"""
get_token(self)
# Create a set of sample data
ItemThematicFactory.create_batch(6)
def test_can_get_item_thematic_list(self):
"""
Ensure ItemThematic objects exists
"""
# kwargs for the related tables
url = reverse('itemthematic-list', kwargs={
'item_pk': 1})
# ORM side
item_thematics = ItemThematic.objects.all()
self.assertEqual(len(item_thematics), 6)
# API side
response = self.client.get(url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
@parameterized.expand(ITEMTHEMATIC_STRUCTURE)
def test_has_valid_item_thematic_values(self, attribute, attribute_type):
"""
Ensure ItemThematic objects have valid values
"""
# kwargs for the related tables
url = reverse('itemthematic-list', kwargs={
'item_pk': 1})
response = self.client.get(url)
self.assertIsInstance(response.data, list)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for item_thematic in response.data:
# Check only expected attributes returned
self.assertEqual(
sorted(item_thematic.keys()), ITEMTHEMATIC_FIELDS)
# Ensure type of each attribute
if attribute_type == str:
self.assertIsInstance(item_thematic[attribute], str)
else:
self.assertIsInstance(item_thematic[attribute], attribute_type)
self.assertIsNot(item_thematic[attribute], '')
def test_get_an_item_thematic(self):
"""
Ensure we can get an ItemThematic objects
using an existing id
"""
item = ItemThematic.objects.first()
# kwargs for the related tables
url = reverse('itemthematic-detail', kwargs={
'item_pk': item.item.id,
'pk': item.thematic.id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
def test_create_an_item_thematic(self):
"""
Ensure we can create an ItemThematic object
"""
data = factory.build(
dict,
FACTORY_CLASS=ItemThematicFactory)
# Convert the related entity in dictionnary.
# Then they will be easily converted in JSON format.
data['item'] = 1
data['thematic'] = 2
url = reverse('itemthematic-list', kwargs={
'item_pk': data['item']})
response = self.client.post(url, data, format='json')
# Check only expected attributes returned
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIsInstance(response.data, dict)
self.assertEqual(
sorted(response.data.keys()),
ITEMTHEMATIC_FIELDS)
# kwargs for the related tables
url = reverse(
'itemthematic-detail',
kwargs={'item_pk': response.data['item']['id'],
'pk': response.data['id']}
)
response_get = self.client.get(url)
self.assertEqual(response_get.status_code, status.HTTP_200_OK)
self.assertIsInstance(response_get.data, dict)
def test_delete_an_item_thematic(self):
"""
Ensure we can delete an ItemThematic object
"""
item = ItemThematic.objects.first()
# Delete this object
# kwargs for the related tables
url = reverse(
'itemthematic-detail', kwargs={
'item_pk': item.item.id,
'pk': item.thematic.id}
)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Ensure ItemThematic removed
# kwargs for the related tables
url_get = reverse(
'itemthematic-detail', kwargs={
'item_pk': item.item.id,
'pk': item.thematic.id}
)
response_get = self.client.get(url_get)
self.assertEqual(response_get.status_code, status.HTTP_404_NOT_FOUND)
|
[
"django.urls.reverse",
"parameterized.parameterized.expand",
"factory.build"
] |
[((1976, 2020), 'parameterized.parameterized.expand', 'parameterized.expand', (['ITEMTHEMATIC_STRUCTURE'], {}), '(ITEMTHEMATIC_STRUCTURE)\n', (1996, 2020), False, 'from parameterized import parameterized\n'), ((1557, 1608), 'django.urls.reverse', 'reverse', (['"""itemthematic-list"""'], {'kwargs': "{'item_pk': 1}"}), "('itemthematic-list', kwargs={'item_pk': 1})\n", (1564, 1608), False, 'from django.urls import reverse\n'), ((2232, 2283), 'django.urls.reverse', 'reverse', (['"""itemthematic-list"""'], {'kwargs': "{'item_pk': 1}"}), "('itemthematic-list', kwargs={'item_pk': 1})\n", (2239, 2283), False, 'from django.urls import reverse\n'), ((3205, 3298), 'django.urls.reverse', 'reverse', (['"""itemthematic-detail"""'], {'kwargs': "{'item_pk': item.item.id, 'pk': item.thematic.id}"}), "('itemthematic-detail', kwargs={'item_pk': item.item.id, 'pk': item.\n thematic.id})\n", (3212, 3298), False, 'from django.urls import reverse\n'), ((3635, 3689), 'factory.build', 'factory.build', (['dict'], {'FACTORY_CLASS': 'ItemThematicFactory'}), '(dict, FACTORY_CLASS=ItemThematicFactory)\n', (3648, 3689), False, 'import factory\n'), ((3900, 3962), 'django.urls.reverse', 'reverse', (['"""itemthematic-list"""'], {'kwargs': "{'item_pk': data['item']}"}), "('itemthematic-list', kwargs={'item_pk': data['item']})\n", (3907, 3962), False, 'from django.urls import reverse\n'), ((4368, 4479), 'django.urls.reverse', 'reverse', (['"""itemthematic-detail"""'], {'kwargs': "{'item_pk': response.data['item']['id'], 'pk': response.data['id']}"}), "('itemthematic-detail', kwargs={'item_pk': response.data['item'][\n 'id'], 'pk': response.data['id']})\n", (4375, 4479), False, 'from django.urls import reverse\n'), ((4950, 5043), 'django.urls.reverse', 'reverse', (['"""itemthematic-detail"""'], {'kwargs': "{'item_pk': item.item.id, 'pk': item.thematic.id}"}), "('itemthematic-detail', kwargs={'item_pk': item.item.id, 'pk': item.\n thematic.id})\n", (4957, 5043), False, 'from django.urls import reverse\n'), ((5310, 5403), 'django.urls.reverse', 'reverse', (['"""itemthematic-detail"""'], {'kwargs': "{'item_pk': item.item.id, 'pk': item.thematic.id}"}), "('itemthematic-detail', kwargs={'item_pk': item.item.id, 'pk': item.\n thematic.id})\n", (5317, 5403), False, 'from django.urls import reverse\n')]
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import os
import signal
import tempfile
import time
import unittest
import persistent_queue
class TestQueueSingleProcess(unittest.TestCase):
def test_empty(self):
with persistent_queue.Queue(":memory:") as q:
self.assertEqual(list(q.get()), [])
def test_put_single(self):
with persistent_queue.Queue(":memory:") as q:
q.put(42)
self.assertEqual(list(q.get()), [42])
def test_put_none(self):
with persistent_queue.Queue(":memory:") as q:
q.put(None)
self.assertEqual(list(q.get()), [None])
def test_put_with_get(self):
with persistent_queue.Queue(":memory:") as q:
q.put(42)
q.put(43)
self.assertEqual(list(q.get()), [42, 43])
q.put(12)
q.put(13)
self.assertEqual(list(q.get()), [12, 13])
def test_interrupted_get(self):
with persistent_queue.Queue(":memory:") as q:
for x in [42, 43, 44]:
q.put(x)
for x in q.get():
if x == 42:
continue
if x == 43:
break
self.fail("Unexpected value: {}".format(x))
self.assertEqual(list(q.get()), [43, 44])
def test_get_blocking_wakes_up(self):
with persistent_queue.Queue(":memory:") as q:
with futures.ThreadPoolExecutor(max_workers=2) as executor:
def consumer():
self.assertEqual(next(q.get_blocking(tick=5)), "foo")
c = executor.submit(consumer)
# Give it enough time to create the database and wait.
time.sleep(0.4)
def producer():
q.put("foo")
p = executor.submit(producer)
p.result()
# The consumer now should finish immediately.
# Let's give it a short grace period.
start = time.time()
c.result()
self.assertLess(time.time() - start,
1,
msg="Consumer wasn't triggered soon enough")
def test_concurrent_load(self):
count = 20
with persistent_queue.Queue(":memory:") as q:
def consumer(client):
collected = []
while len(collected) < count * count:
# TODO
collected.extend(q.get(client=client))
#time.sleep(1)
collected.sort(key=lambda pair: pair[0])
self.assertEqual(collected,
[(id, i) for id in range(count) for i in range(count)])
def producer(tag):
for i in range(count):
q.put((tag, i))
with futures.ThreadPoolExecutor(max_workers=2 * count) as executor:
consumers = []
for client in range(count):
c = executor.submit(consumer, client)
consumers.append(c)
producers = []
for tag in range(count):
p = executor.submit(producer, tag)
producers.append(p)
# Wait for all to finish successfully.
for p in producers:
p.result()
for c in consumers:
c.result()
class TestQueueMultiProcess(unittest.TestCase):
@staticmethod
def _kill_self():
"""Kill the current process as hard as possible."""
os.kill(os.getpid(), signal.SIGKILL)
@staticmethod
def _put_and_die(database, elements):
"""Put given elements into the database and die horribly."""
with persistent_queue.Queue(database) as q:
for item in elements:
q.put(item)
TestQueueMultiProcess._kill_self()
@staticmethod
def _get_and_die(database, last, tick=0.5):
"""Get elements until `last` is seen and die horribly."""
with persistent_queue.Queue(database) as q:
for item in q.get_blocking(tick=tick):
if item == last:
TestQueueMultiProcess._kill_self()
def test_killed_during_get(self):
with tempfile.TemporaryDirectory() as directory:
database = os.path.join(directory, "database")
with persistent_queue.Queue(database) as q:
for x in [42, 43, 44]:
q.put(x)
with futures.ProcessPoolExecutor(max_workers=1) as executor:
executor.submit(TestQueueMultiProcess._get_and_die, database,
43).exception()
with persistent_queue.Queue(database) as q:
self.assertEqual(list(q.get()), [43, 44])
def test_killed_after_put(self):
with tempfile.TemporaryDirectory() as directory:
database = os.path.join(directory, "database")
with futures.ProcessPoolExecutor(max_workers=1) as executor:
executor.submit(TestQueueMultiProcess._put_and_die, database,
[42]).exception()
with persistent_queue.Queue(database) as q:
self.assertEqual(list(q.get()), [42])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"tempfile.TemporaryDirectory",
"os.getpid",
"concurrent.futures.ProcessPoolExecutor",
"time.sleep",
"time.time",
"persistent_queue.Queue",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join"
] |
[((5239, 5254), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5252, 5254), False, 'import unittest\n'), ((807, 841), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (829, 841), False, 'import persistent_queue\n'), ((929, 963), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (951, 963), False, 'import persistent_queue\n'), ((1067, 1101), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (1089, 1101), False, 'import persistent_queue\n'), ((1213, 1247), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (1235, 1247), False, 'import persistent_queue\n'), ((1458, 1492), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (1480, 1492), False, 'import persistent_queue\n'), ((1794, 1828), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (1816, 1828), False, 'import persistent_queue\n'), ((2569, 2603), 'persistent_queue.Queue', 'persistent_queue.Queue', (['""":memory:"""'], {}), "(':memory:')\n", (2591, 2603), False, 'import persistent_queue\n'), ((3691, 3702), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3700, 3702), False, 'import os\n'), ((3851, 3883), 'persistent_queue.Queue', 'persistent_queue.Queue', (['database'], {}), '(database)\n', (3873, 3883), False, 'import persistent_queue\n'), ((4113, 4145), 'persistent_queue.Queue', 'persistent_queue.Queue', (['database'], {}), '(database)\n', (4135, 4145), False, 'import persistent_queue\n'), ((4313, 4342), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4340, 4342), False, 'import tempfile\n'), ((4374, 4409), 'os.path.join', 'os.path.join', (['directory', '"""database"""'], {}), "(directory, 'database')\n", (4386, 4409), False, 'import os\n'), ((4834, 4863), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4861, 4863), False, 'import tempfile\n'), ((4895, 4930), 'os.path.join', 'os.path.join', (['directory', '"""database"""'], {}), "(directory, 'database')\n", (4907, 4930), False, 'import os\n'), ((1846, 1887), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(2)'}), '(max_workers=2)\n', (1872, 1887), False, 'from concurrent import futures\n'), ((2100, 2115), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (2110, 2115), False, 'import time\n'), ((2338, 2349), 'time.time', 'time.time', ([], {}), '()\n', (2347, 2349), False, 'import time\n'), ((3060, 3109), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(2 * count)'}), '(max_workers=2 * count)\n', (3086, 3109), False, 'from concurrent import futures\n'), ((4421, 4453), 'persistent_queue.Queue', 'persistent_queue.Queue', (['database'], {}), '(database)\n', (4443, 4453), False, 'import persistent_queue\n'), ((4522, 4564), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (4549, 4564), False, 'from concurrent import futures\n'), ((4700, 4732), 'persistent_queue.Queue', 'persistent_queue.Queue', (['database'], {}), '(database)\n', (4722, 4732), False, 'import persistent_queue\n'), ((4943, 4985), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (4970, 4985), False, 'from concurrent import futures\n'), ((5123, 5155), 'persistent_queue.Queue', 'persistent_queue.Queue', (['database'], {}), '(database)\n', (5145, 5155), False, 'import persistent_queue\n'), ((2393, 2404), 'time.time', 'time.time', ([], {}), '()\n', (2402, 2404), False, 'import time\n')]
|
from features.importers.venepaikka_harbors.importer import VenepaikkaHarborsClient
from features.models import ContactInfo, Feature
from features.tests.factories import ContactInfoFactory
HARBORS_URL = VenepaikkaHarborsClient.url
HARBOR_ID = "SGFyYm9yTm9kZTpiNzE0ODE1NC1kYmE5LTRlM2ItOWQ2ZS1jNTYzNmEyNWFhMzk="
def test_contact_information_is_imported(requests_mock, importer, harbors_response):
requests_mock.post(HARBORS_URL, json=harbors_response)
importer.import_features()
feature = Feature.objects.get(source_id=HARBOR_ID)
contact_info = feature.contact_info
assert contact_info.street_address == "Ramsaynranta 4"
assert contact_info.postal_code == "00330"
assert contact_info.municipality == "Helsinki"
assert contact_info.phone_number == "+358501234567"
assert contact_info.email == "<EMAIL>"
def test_contact_info_is_deleted(requests_mock, importer, harbors_response):
"""When address data is not provided, existing ContactInfo will get deleted."""
ContactInfoFactory(
feature__source_type=importer.get_source_type(), feature__source_id=HARBOR_ID
)
empty_fields = [
"name",
"streetAddress",
"zipCode",
"municipality",
"phone",
"email",
]
for harbor in harbors_response["data"]["harbors"]["edges"]:
for field in empty_fields:
harbor["node"]["properties"][field] = ""
requests_mock.post(HARBORS_URL, json=harbors_response)
importer.import_features()
assert ContactInfo.objects.count() == 0
|
[
"features.models.Feature.objects.get",
"features.models.ContactInfo.objects.count"
] |
[((503, 543), 'features.models.Feature.objects.get', 'Feature.objects.get', ([], {'source_id': 'HARBOR_ID'}), '(source_id=HARBOR_ID)\n', (522, 543), False, 'from features.models import ContactInfo, Feature\n'), ((1520, 1547), 'features.models.ContactInfo.objects.count', 'ContactInfo.objects.count', ([], {}), '()\n', (1545, 1547), False, 'from features.models import ContactInfo, Feature\n')]
|
# MIT License
# Copyright 2017,Code 4 Canada
# written by and for the bicycle parking project, a joint project of
# Civic Tech Toronto, Cycle Toronto, Code 4 Canada, and the
# City of Toronto
#
# Modified 2017 10 28
# Purpose add geocode to view
#
# Modified 2018 02 27
# Purpose added code to write to separate picture table
#
# Modified 2018 05 03
# Purpose added endpoint to handle beta user comment submission
#
# Modified 2018 06 01
# Purpose add location endpoint
#
# Modified 2018 07 12
# Purpose add dashboard endpoint
#
# Modified
# Purpose
#
from django.views.decorators.csrf import csrf_exempt
import json
import base64
from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponse
from django.core.files.base import ContentFile
from datetime import datetime
from rest_framework import generics
from rest_framework.parsers import FileUploadParser
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
import os.path
from rest_framework.response import Response
from rest_framework import status
from bicycleparking.serializers import SurveyAnswerSerializer
from bicycleparking.serializers import BetaCommentSerializer
from bicycleparking.models import SurveyAnswer
from bicycleparking.models import Picture
from bicycleparking.models import BetaComments
from bicycleparking.models import Approval
from bicycleparking.models import Event
from bicycleparking.photos.gcpuploader import GCPUploader
from bicycleparking.geocode import Geocode
from bicycleparking.LocationData import LocationData
from bicycleparking.CollectedData import CollectedData
from bicycleparking.Moderate import Moderate
# Create your views here.
def index(request):
return render(request, 'bicycleparking/index.html', {})
class SurveyAnswerList(generics.ListCreateAPIView):
"""Generates the main table entries from the user's survey input, generates
the geographical aggregation data (closest and closest major intersection),
and accesses the survey data to obtain the URI for a picture submitted and
stored separately."""
queryset = SurveyAnswer.objects.all()
serializer_class = SurveyAnswerSerializer
def perform_create(self, serializer):
"""Executes the HTTP POST request by creating four objects: the survey
answer using the serializer and the aggregate geographic data (Geocode)."""
answer = serializer.save()
geocode = Geocode(answer, ipAddress=self.request.META['REMOTE_ADDR'])
geocode.output()
class BetaCommentList(generics.ListCreateAPIView):
"""Generic comments section for the beta release of the application.
Users can submit any comments about the application.
"""
queryset = BetaComments.objects.all()
serializer_class = BetaCommentSerializer
def perform_create(self, serializer):
"""Executes the HTTP POST request by creating four objects: the survey
answer using the serializer, the aggregate geographic data (Geocode)
and event record using the geocode class, and the picture record."""
serializer.save()
class DashboardRequest (APIView) :
"""Wraps the location name object for retrieving data from the LocationData
object."""
def post (self, request) :
"""Takes a set of POST parameters containing the limits of the
map viewport and returns a JSON string containing the details
of all the approved pins in the selected rectangle.
The data returned by this call will depend on the settings in
the CollectedData object, but they will generally include the
names of the closest and the closest major intersection, the
time of the request and the span of time requested for parking,
the problem as defined by the user and a URI describing the
picture (if any) associated with the request."""
data = request.body.decode ('utf-8')
if data :
param = json.loads (data)
else :
param = {}
return self.access (param)
def get (self, request) :
"""
Attempts to get all of the data for Edmonton. Similar to the
POST method, but it doesn't include additional parameters."""
return self.access ({})
def access (self, param) :
"""Provides access to the database for both POST and GET requests."""
# print(param)
upLeft = lowRight = None
if 'upper_left' in param :
upLeft = param ['upper_left']
if 'lower_right' in param :
lowRight = param ['lower_right']
data = CollectedData (upLeft, lowRight)
result = { 'dashboard' : data.get () }
return JsonResponse (result)
class LocationNameRequest (APIView) :
"""Wraps the location name object for retrieving data from the LocationData
object."""
decorators = [ csrf_exempt ]
def post (self, request) :
"""Takes a set of GET or POST parameters containing the lat/long and returns a JSON
string containing the names of the closest and the closest major intersection;
note, if the closest intersection is a major intersection, these fields will
contain the same value."""
data = request.body.decode ('utf-8')
if data :
param = json.loads (data)
else :
param = {}
# print(param)
data = LocationData (param ['latitude'], param ['longitude'])
return JsonResponse (data.getIntersectionNames ())
class DownloadPicture(APIView):
uploader = GCPUploader()
def get(self, request, filename, format=None):
if filename:
try:
ctype = "image/" + os.path.splitext(filename)[1]
return HttpResponse(self.uploader.read (filename), content_type=ctype)
except:
return HttpResponse(status=500)
else:
return HttpResponse(status=500)
class UploadPicture(APIView):
renderer_classes = (JSONRenderer, )
uploader = GCPUploader()
def post(self, request, filename, format=None):
file_obj = self.request.data['picture']
ipAddress = request.META['REMOTE_ADDR']
format, imgstr = file_obj.split(';base64,')
ext = format.split('/')[-1]
file = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)
print ("upload source address = {0}".format (ipAddress))
uri = 'test/picture'
try:
uri = self.uploader.write(filename, file)
except:
print('Picture upload failed. Are the credentials accurate?')
pic = Picture (photo_uri = uri)
pic.save ()
return Response({ 's3_name' : uri, 'id': pic.id})
def submissions_to_moderate(request):
if request.method == 'POST':
event_id = request.POST.get('event_id', None)
if event_id:
event = Event.objects.get(id=event_id)
Approval.objects.get_or_create(approved=event)
context = {}
approved_event_ids = Approval.objects.values_list('approved') # already approved events
unapproved_events = Event.objects.exclude(id__in=approved_event_ids) # only show unapproved events
context ['unapproved_events'] = Moderate ().getUnmoderated ()
return render(request, 'bicycleparking/moderation.html', context)
|
[
"bicycleparking.models.Approval.objects.values_list",
"bicycleparking.photos.gcpuploader.GCPUploader",
"bicycleparking.models.Picture",
"json.loads",
"django.http.HttpResponse",
"bicycleparking.models.BetaComments.objects.all",
"django.http.JsonResponse",
"base64.b64decode",
"bicycleparking.geocode.Geocode",
"bicycleparking.models.SurveyAnswer.objects.all",
"bicycleparking.CollectedData.CollectedData",
"rest_framework.response.Response",
"bicycleparking.models.Approval.objects.get_or_create",
"bicycleparking.Moderate.Moderate",
"django.shortcuts.render",
"bicycleparking.models.Event.objects.get",
"bicycleparking.models.Event.objects.exclude",
"bicycleparking.LocationData.LocationData"
] |
[((1751, 1799), 'django.shortcuts.render', 'render', (['request', '"""bicycleparking/index.html"""', '{}'], {}), "(request, 'bicycleparking/index.html', {})\n", (1757, 1799), False, 'from django.shortcuts import render\n'), ((2134, 2160), 'bicycleparking.models.SurveyAnswer.objects.all', 'SurveyAnswer.objects.all', ([], {}), '()\n', (2158, 2160), False, 'from bicycleparking.models import SurveyAnswer\n'), ((2765, 2791), 'bicycleparking.models.BetaComments.objects.all', 'BetaComments.objects.all', ([], {}), '()\n', (2789, 2791), False, 'from bicycleparking.models import BetaComments\n'), ((5630, 5643), 'bicycleparking.photos.gcpuploader.GCPUploader', 'GCPUploader', ([], {}), '()\n', (5641, 5643), False, 'from bicycleparking.photos.gcpuploader import GCPUploader\n'), ((6096, 6109), 'bicycleparking.photos.gcpuploader.GCPUploader', 'GCPUploader', ([], {}), '()\n', (6107, 6109), False, 'from bicycleparking.photos.gcpuploader import GCPUploader\n'), ((7101, 7141), 'bicycleparking.models.Approval.objects.values_list', 'Approval.objects.values_list', (['"""approved"""'], {}), "('approved')\n", (7129, 7141), False, 'from bicycleparking.models import Approval\n'), ((7192, 7240), 'bicycleparking.models.Event.objects.exclude', 'Event.objects.exclude', ([], {'id__in': 'approved_event_ids'}), '(id__in=approved_event_ids)\n', (7213, 7240), False, 'from bicycleparking.models import Event\n'), ((7349, 7407), 'django.shortcuts.render', 'render', (['request', '"""bicycleparking/moderation.html"""', 'context'], {}), "(request, 'bicycleparking/moderation.html', context)\n", (7355, 7407), False, 'from django.shortcuts import render\n'), ((2466, 2525), 'bicycleparking.geocode.Geocode', 'Geocode', (['answer'], {'ipAddress': "self.request.META['REMOTE_ADDR']"}), "(answer, ipAddress=self.request.META['REMOTE_ADDR'])\n", (2473, 2525), False, 'from bicycleparking.geocode import Geocode\n'), ((4660, 4691), 'bicycleparking.CollectedData.CollectedData', 'CollectedData', (['upLeft', 'lowRight'], {}), '(upLeft, lowRight)\n', (4673, 4691), False, 'from bicycleparking.CollectedData import CollectedData\n'), ((4755, 4775), 'django.http.JsonResponse', 'JsonResponse', (['result'], {}), '(result)\n', (4767, 4775), False, 'from django.http import JsonResponse\n'), ((5457, 5508), 'bicycleparking.LocationData.LocationData', 'LocationData', (["param['latitude']", "param['longitude']"], {}), "(param['latitude'], param['longitude'])\n", (5469, 5508), False, 'from bicycleparking.LocationData import LocationData\n'), ((6697, 6719), 'bicycleparking.models.Picture', 'Picture', ([], {'photo_uri': 'uri'}), '(photo_uri=uri)\n', (6704, 6719), False, 'from bicycleparking.models import Picture\n'), ((6759, 6799), 'rest_framework.response.Response', 'Response', (["{'s3_name': uri, 'id': pic.id}"], {}), "({'s3_name': uri, 'id': pic.id})\n", (6767, 6799), False, 'from rest_framework.response import Response\n'), ((4017, 4033), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (4027, 4033), False, 'import json\n'), ((5364, 5380), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (5374, 5380), False, 'import json\n'), ((5985, 6009), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(500)'}), '(status=500)\n', (5997, 6009), False, 'from django.http import HttpResponse\n'), ((6378, 6402), 'base64.b64decode', 'base64.b64decode', (['imgstr'], {}), '(imgstr)\n', (6394, 6402), False, 'import base64\n'), ((6970, 7000), 'bicycleparking.models.Event.objects.get', 'Event.objects.get', ([], {'id': 'event_id'}), '(id=event_id)\n', (6987, 7000), False, 'from bicycleparking.models import Event\n'), ((7012, 7058), 'bicycleparking.models.Approval.objects.get_or_create', 'Approval.objects.get_or_create', ([], {'approved': 'event'}), '(approved=event)\n', (7042, 7058), False, 'from bicycleparking.models import Approval\n'), ((7308, 7318), 'bicycleparking.Moderate.Moderate', 'Moderate', ([], {}), '()\n', (7316, 7318), False, 'from bicycleparking.Moderate import Moderate\n'), ((5927, 5951), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(500)'}), '(status=500)\n', (5939, 5951), False, 'from django.http import HttpResponse\n')]
|
import sys
list1=['a','b','c','d','e','f','g','h','i','j','k','l','m']
list2=['n','o','p','q','r','s','t','u','v','w','x','y','z']
w=input("Enter a word ")
prepartner=prepartner1=[]
postpartner1=postpartner=[]
for i in w:
if(i in list1):
prepartner.append(i)
if(i in list2):
postpartner.append(i)
for j in prepartner:
list1index=list1.index(j)
if(list2[list1index] in postpartner):#testing if all prepartners has postpartners
pass
else:
print("YOU LOST")
sys.exit()
prepartner1=prepartner
postpartner1=postpartner
for k in prepartner:
x=prepartner.index(k)
y=postpartner.index(list2[list1.index(k)])
if(w.index(prepartner[x])<w.index(postpartner[y])):
if(w.index(postpartner[y])-w.index(prepartner[x])==1):#testing3a
prepartner1.pop(x)
postpartner1.pop(y)
else:
print("YOU LOST")
sys.exit()
postpartner1.reverse()
count=0
for l in prepartner1:
if(prepartner1.index(l)==postpartner1.index(list2[list1.index(l)])):#testing3b
count+=1
if(count==len(prepartner1)):
print("GAME WON")
else:
print("GAME LOST")
|
[
"sys.exit"
] |
[((532, 542), 'sys.exit', 'sys.exit', ([], {}), '()\n', (540, 542), False, 'import sys\n'), ((933, 943), 'sys.exit', 'sys.exit', ([], {}), '()\n', (941, 943), False, 'import sys\n')]
|
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.test import BaseTestCase
class TestClassAttributeController(BaseTestCase):
"""ClassAttributeController integration test stubs"""
def test_get_class_attributes(self):
"""Test case for get_class_attributes
Lists attributes for the given class
"""
response = self.client.open(
'/pablokvitca/classdeck-api/1.0.0/class_attribute/{class_department}/{class_number}'.format(class_department='class_department_example', class_number=9999),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
[
"unittest.main"
] |
[((802, 817), 'unittest.main', 'unittest.main', ([], {}), '()\n', (815, 817), False, 'import unittest\n')]
|
import json
from app import db, create_app
from tests.base import BaseTestHelper
class ReviewsTestCase(BaseTestHelper):
# reviews url
review_url = 'api/v2/businesses/{}/reviews'
"""class for testing reviews"""
def setUp(self):
"""initilize the app and
set test variables"""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.review = {"opinion": "Good business with good food", "rating": 5}
self.wrong_review = {"rating": 5}
self.wrong_rating = {"opinion": "Good business with good food"}
self.business = {'name': 'crastycrab', 'description': 'Fastfood', 'contact': '0702848032', 'category': 'fastfood', 'location': 'atlantic'}
# bind app to the current context
with self.app.app_context():
# create db tables
db.create_all()
def add_business(self):
self.register_user("<EMAIL>", "123test", "123test")
result = self.login_user("<EMAIL>", "123test", "123test")
# get the access token
access_token = json.loads(result.data.decode())['access_token']
# add the access token to the header
response = self.client().post('/api/v2/businesses', headers=dict(Authorization="Bearer " + access_token), data=self.business)
return response
def test_review_creation(self):
# first add the business
self.add_business()
# add the review for the business
result = self.client().post(ReviewsTestCase.review_url.format('1'), data= self.review)
self.assertEqual(result.status_code, 201)
self.assertIn("succesfully added the review", str(result.data))
def test_review_get_all(self):
self.add_business()
self.client().post(ReviewsTestCase.review_url.format('1'), data=self.review)
# get reviews for the particular business
result = self.client().get(ReviewsTestCase.review_url.format('1'))
self.assertEqual(result.status_code, 200)
def test_review_not_exist(self):
self.add_business()
# get reviews without adding the reviews
result = self.client().get(ReviewsTestCase.review_url.format('1'))
self.assertEqual(result.status_code, 404)
def test_add_review_business_not_exist(self):
# post review for a business that does not exist
result = self.client().post(ReviewsTestCase.review_url.format('10'), data=self.review)
self.assertEqual(result.status_code, 404)
self.assertIn('cannot add review business that does not exist', str(result.data))
def test_get_review_business_not_exist(self):
self.add_business()
self.client().post(ReviewsTestCase.review_url.format('1'), data=self.review)
# get review for a business that does not exist
result = self.client().get(ReviewsTestCase.review_url.format('5'))
self.assertEqual(result.status_code, 404)
def test_add_empty_opinion(self):
# first add the business
self.add_business()
result = self.client().post(ReviewsTestCase.review_url.format('1'), data=self.wrong_review)
self.assertEqual(result.status_code, 400)
self.assertIn("make sure the opinion and rating are included", str(result.data))
def test_add_empty_rating(self):
# first add the business
self.add_business()
result = self.client().post(ReviewsTestCase.review_url.format('1'), data=self.wrong_review)
self.assertEqual(result.status_code, 400)
self.assertIn("make sure the opinion and rating are included", str(result.data))
def tearDown(self):
"""run after every test to ensure database is empty"""
with self.app.app_context():
db.session.remove()
db.drop_all()
|
[
"app.create_app",
"app.db.create_all",
"app.db.drop_all",
"app.db.session.remove"
] |
[((328, 361), 'app.create_app', 'create_app', ([], {'config_name': '"""testing"""'}), "(config_name='testing')\n", (338, 361), False, 'from app import db, create_app\n'), ((867, 882), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (880, 882), False, 'from app import db, create_app\n'), ((3793, 3812), 'app.db.session.remove', 'db.session.remove', ([], {}), '()\n', (3810, 3812), False, 'from app import db, create_app\n'), ((3825, 3838), 'app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (3836, 3838), False, 'from app import db, create_app\n')]
|
import argparse
import os
import subprocess
import sys
import ssl
import urllib.request
import tarfile
parser = argparse.ArgumentParser()
parser.add_argument(
'--name',
required=True,
)
parser.add_argument(
'--url',
required=True,
)
parser.add_argument(
'--token',
required=True,
)
args = parser.parse_args()
name = args.name
url = args.url
token = args.token
actions_runner_path = f"./actions-runner-{name}"
def actions_runner_install(actions_runner_path):
os.mkdir(actions_runner_path)
tf = f"./{actions_runner_path}/actions-runner-linux-x64-2.273.5.tar.gz"
ssl._create_default_https_context = ssl._create_unverified_context
urllib.request.urlretrieve(
"https://github.com/actions/runner/releases/download/v2.273.5/actions-runner-linux-x64-2.273.5.tar.gz",
tf
)
with tarfile.open(tf) as f:
f.extractall(actions_runner_path)
def actions_runner_config(actions_runner_path, url, token):
return subprocess.call([
f"{actions_runner_path}/config.sh",
"--url", url,
"--token", token
])
if not os.path.isdir(actions_runner_path):
actions_runner_install(actions_runner_path)
if os.path.isdir(actions_runner_path + ".runner"):
raise FileExistsError(
"`.runner` config file already present, if you want to start a runner"
"then use the `run` app."
)
exit_code = actions_runner_config(actions_runner_path, url, token)
sys.exit(exit_code)
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.path.isdir",
"subprocess.call",
"tarfile.open",
"sys.exit"
] |
[((113, 138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (136, 138), False, 'import argparse\n'), ((1191, 1237), 'os.path.isdir', 'os.path.isdir', (["(actions_runner_path + '.runner')"], {}), "(actions_runner_path + '.runner')\n", (1204, 1237), False, 'import os\n'), ((1454, 1473), 'sys.exit', 'sys.exit', (['exit_code'], {}), '(exit_code)\n', (1462, 1473), False, 'import sys\n'), ((494, 523), 'os.mkdir', 'os.mkdir', (['actions_runner_path'], {}), '(actions_runner_path)\n', (502, 523), False, 'import os\n'), ((979, 1068), 'subprocess.call', 'subprocess.call', (["[f'{actions_runner_path}/config.sh', '--url', url, '--token', token]"], {}), "([f'{actions_runner_path}/config.sh', '--url', url,\n '--token', token])\n", (994, 1068), False, 'import subprocess\n'), ((1103, 1137), 'os.path.isdir', 'os.path.isdir', (['actions_runner_path'], {}), '(actions_runner_path)\n', (1116, 1137), False, 'import os\n'), ((842, 858), 'tarfile.open', 'tarfile.open', (['tf'], {}), '(tf)\n', (854, 858), False, 'import tarfile\n')]
|
from setuptools import setup
setup(
name="fuzz",
version="0.1.1",
description="Contains tools for modelling values with associated uncertainty.",
url="https://fuzz.samireland.com",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Chemistry",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords="statistics measurements unertainty propagation",
packages=["fuzz"],
)
|
[
"setuptools.setup"
] |
[((30, 882), 'setuptools.setup', 'setup', ([], {'name': '"""fuzz"""', 'version': '"""0.1.1"""', 'description': '"""Contains tools for modelling values with associated uncertainty."""', 'url': '"""https://fuzz.samireland.com"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 4 - Beta', 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6']", 'keywords': '"""statistics measurements unertainty propagation"""', 'packages': "['fuzz']"}), "(name='fuzz', version='0.1.1', description=\n 'Contains tools for modelling values with associated uncertainty.', url\n ='https://fuzz.samireland.com', author='<NAME>', author_email='<EMAIL>',\n license='MIT', classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], keywords=\n 'statistics measurements unertainty propagation', packages=['fuzz'])\n", (35, 882), False, 'from setuptools import setup\n')]
|
# Build the speaker and phone networks.
# In this framework, they are both TDNN with different settings.
# The speaker network is a hard-coded TDNN and the phone network is specified by the parameters.
# Of course, the speaker network can be modified (e.g. to a larger network). Meanwhile, the parameters for the
# phone network should be modified as well so that the architecure is consistent with the speaker network.
# TODO: we can make the speaker network also controlled by config file which is not too difficult.
import tensorflow as tf
from model.multitask_v1.pooling import statistics_pooling_v2
from model.common import l2_scaling, shape_list, prelu
def build_speaker_encoder(features, phone_labels, feature_length, params, endpoints, reuse_variables, is_training=False):
"""Build encoder for speaker latent variable.
Use the same tdnn network with x-vector.
Args:
features: the input features.
phone_labels: the phone labels (i.e. alignment). will be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables.
is_training: used in batchnorm
:return: sampled_zs, mu_zs, logvar_zs
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
with tf.variable_scope("encoder", reuse=reuse_variables):
# Layer 1: [-2,-1,0,1,2] --> [b, 1, l-4, 512]
# conv2d + batchnorm + relu
features = tf.layers.conv2d(features,
512,
(1, 5),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv1')
endpoints["conv1"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn1")
endpoints["bn1"] = features
features = relu(features, name='relu1')
endpoints["relu1"] = features
# Layer 2: [-2, -1, 0, 1, 2] --> [b ,1, l-4, 512]
# conv2d + batchnorm + relu
# This is slightly different with Kaldi which use dilation convolution
features = tf.layers.conv2d(features,
512,
(1, 5),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv2')
endpoints["conv2"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn2")
endpoints["bn2"] = features
features = relu(features, name='relu2')
endpoints["relu2"] = features
# Layer 3: [-3, -2, -1, 0, 1, 2, 3] --> [b, 1, l-6, 512]
# conv2d + batchnorm + relu
# Still, use a non-dilation one
features = tf.layers.conv2d(features,
512,
(1, 7),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv3')
endpoints["conv3"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn3")
endpoints["bn3"] = features
features = relu(features, name='relu3')
endpoints["relu3"] = features
# Convert to [b, l, 512]
features = tf.squeeze(features, axis=1)
# The output of the 3-rd layer can simply be rank 3.
endpoints["relu3"] = features
# Layer 4: [b, l, 512] --> [b, l, 512]
features = tf.layers.dense(features,
512,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense4")
endpoints["dense4"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn4")
endpoints["bn4"] = features
features = relu(features, name='relu4')
endpoints["relu4"] = features
# Layer 5: [b, l, x]
if "num_nodes_pooling_layer" not in params.dict:
# The default number of nodes before pooling
params.dict["num_nodes_pooling_layer"] = 1500
features = tf.layers.dense(features,
params.num_nodes_pooling_layer,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense5")
endpoints["dense5"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn5")
endpoints["bn5"] = features
features = relu(features, name='relu5')
endpoints["relu5"] = features
# Here, we need to slice the feature since the original feature is expanded by the larger context between
# the speaker and phone context. I make a hypothesis that the phone context will be larger.
# So the speaker network need to slicing.
if (params.speaker_left_context < params.phone_left_context and
params.speaker_right_context < params.phone_right_context):
features = features[:, params.phone_left_context - params.speaker_left_context:
params.speaker_right_context - params.phone_right_context, :]
else:
raise NotImplementedError("The speake and phone context is not supported now.")
# Make sure we've got the right feature
with tf.control_dependencies([tf.assert_equal(shape_list(features)[1], shape_list(phone_labels)[1])]):
# Pooling layer
# The length of utterances may be different.
# The original pooling use all the frames which is not appropriate for this case.
# So we create a new function (I don't want to change the original one).
if params.pooling_type == "statistics_pooling":
features = statistics_pooling_v2(features, feature_length, endpoints, params, is_training)
else:
raise NotImplementedError("Not implement %s pooling" % params.pooling_type)
endpoints['pooling'] = features
# Utterance-level network
# Layer 6: [b, 512]
features = tf.layers.dense(features,
512,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='dense6')
endpoints['dense6'] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn6")
endpoints["bn6"] = features
features = relu(features, name='relu6')
endpoints["relu6"] = features
# Layer 7: [b, x]
if "speaker_dim" not in params.dict:
# The default number of nodes in the last layer
params.dict["speaker_dim"] = 512
# We need mean and logvar.
mu = tf.layers.dense(features,
params.speaker_dim,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer),
name="zs_dense")
endpoints['zs_mu_dense'] = mu
if "spk_last_layer_no_bn" not in params.dict:
params.spk_last_layer_no_bn = False
if not params.spk_last_layer_no_bn:
mu = tf.layers.batch_normalization(mu,
momentum=params.batchnorm_momentum,
training=is_training,
name="zs_bn")
endpoints['zs_mu_bn'] = mu
if "spk_last_layer_linear" not in params.dict:
params.spk_last_layer_linear = False
if not params.spk_last_layer_linear:
mu = relu(mu, name="zs_mu_relu")
endpoints['zs_mu_relu'] = mu
# We do not compute logvar in this version.
# Set logvar=0 ==> var=1
logvar = 0
# epsilon = tf.random_normal(tf.shape(mu), name='zs_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample = mu
return sample, mu, logvar
def build_phone_encoder(features, speaker_labels, feature_length, params, endpoints, reuse_variables, is_training=False):
"""Build encoder for phone latent variable.
Use the tdnn and share the same structure in the lower layers.
Args:
features: the input features.
speaker_labels: the speaker labels (i.e. the speaker index). may be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables
is_training: used in batchnorm.
:return: sampled_zs, mu_zs, logvar_zs
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
# # This is moved to the model config file.
# # Acoustic network params:
# # Most share 4 layers with x-vector network.
# # [-2,2], [-2,2], [-3,3], [0], [-4,0,4]
# # The last fully-connected layer is appended as the phonetic embedding
# layer_size = [512, 512, 512, 512, 512]
# kernel_size = [5, 5, 7, 1, 3]
# dilation_size = [1, 1, 1, 1, 4]
num_layers = len(params.phone_kernel_size)
layer_index = 0
if params.num_shared_layers > 0:
# We may share the lower layers of the two tasks.
# Go through the shared layers between the speaker and phone networks.
assert params.num_shared_layers < num_layers
with tf.variable_scope("encoder", reuse=True):
for i in range(params.num_shared_layers):
if params.phone_kernel_size[layer_index] > 1:
if len(shape_list(features)) == 3:
# Add a dummy dim to support 2d conv
features = tf.expand_dims(features, axis=1)
features = tf.layers.conv2d(features,
params.phone_layer_size[layer_index],
(1, params.phone_kernel_size[layer_index]),
activation=None,
dilation_rate=(1, params.phone_dilation_size[layer_index]),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv%d' % (layer_index + 1))
elif params.phone_kernel_size[layer_index] == 1:
if len(shape_list(features)) == 4:
# Remove a dummy dim to do dense layer
features = tf.squeeze(features, axis=1)
features = tf.layers.dense(features,
params.phone_layer_size[layer_index],
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense%d" % (layer_index + 1))
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn%d" % (layer_index + 1))
features = relu(features, name='relu%d' % (layer_index + 1))
layer_index += 1
with tf.variable_scope("encoder_phone", reuse=reuse_variables):
# In the unshared part, the endpoints should be updated.
while layer_index < num_layers:
if params.phone_kernel_size[layer_index] > 1:
if len(shape_list(features)) == 3:
features = tf.expand_dims(features, axis=1)
features = tf.layers.conv2d(features,
params.phone_layer_size[layer_index],
(1, params.phone_kernel_size[layer_index]),
activation=None,
dilation_rate=(1, params.phone_dilation_size[layer_index]),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='phn_conv%d' % (layer_index + 1))
endpoints["phn_conv%d" % (layer_index + 1)] = features
elif params.phone_kernel_size[layer_index] == 1:
if len(shape_list(features)) == 4:
features = tf.squeeze(features, axis=1)
features = tf.layers.dense(features,
params.phone_layer_size[layer_index],
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="phn_dense%d" % (layer_index + 1))
endpoints["phn_dense%d" % (layer_index + 1)] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="phn_bn%d" % (layer_index + 1))
endpoints["phn_bn%d" % (layer_index + 1)] = features
features = relu(features, name='phn_relu%d' % (layer_index + 1))
endpoints["phn_relu%d" % (layer_index + 1)] = features
layer_index += 1
# The last layer
if len(shape_list(features)) == 4:
features = tf.squeeze(features, axis=1)
# Similar with the speaker network, we may need to slice the feature due to the different context between
# the speaker and phone network. At this moment, I just make a hypothesis that the phone context will be
# larger which means there is no need to slice for the phone network
if (params.speaker_left_context > params.phone_left_context and
params.speaker_right_context > params.phone_right_context):
raise NotImplementedError("The speake and phone context is not supported now.")
# features = features[:, params.speaker_left_context - params.phone_left_context:
# params.phone_right_context - params.speaker_right_context, :]
# # We do not validate the length because this will introduce the alignment -- phn_labels, which
# # is unnecessary when doing the phone inference.
# with tf.control_dependencies([tf.assert_equal(shape_list(features)[1], shape_list(self.phn_labels)[1])]):
# features = tf.identity(features)
if "phone_dim" not in params.dict:
params.dict["phone_dim"] = 512
mu = tf.layers.dense(features,
params.phone_dim,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer),
name="zp_dense")
endpoints['zp_mu_dense'] = mu
mu = tf.layers.batch_normalization(mu,
momentum=params.batchnorm_momentum,
training=is_training,
name="zp_bn")
endpoints['zp_mu_bn'] = mu
mu = relu(mu, name='zp_mu_relu')
endpoints['zp_mu_relu'] = mu
logvar = 0
# epsilon = tf.random_normal(tf.shape(mu), name='zp_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample = mu
return sample, mu, logvar
|
[
"tensorflow.contrib.layers.l2_regularizer",
"model.multitask_v1.pooling.statistics_pooling_v2",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"model.common.shape_list",
"tensorflow.layers.batch_normalization",
"tensorflow.expand_dims"
] |
[((1534, 1585), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': 'reuse_variables'}), "('encoder', reuse=reuse_variables)\n", (1551, 1585), True, 'import tensorflow as tf\n'), ((2128, 2241), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn1"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn1')\n", (2157, 2241), True, 'import tensorflow as tf\n'), ((3132, 3245), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn2"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn2')\n", (3161, 3245), True, 'import tensorflow as tf\n'), ((4104, 4217), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn3"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn3')\n", (4133, 4217), True, 'import tensorflow as tf\n'), ((4536, 4564), 'tensorflow.squeeze', 'tf.squeeze', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (4546, 4564), True, 'import tensorflow as tf\n'), ((5115, 5228), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn4"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn4')\n", (5144, 5228), True, 'import tensorflow as tf\n'), ((6127, 6240), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn5"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn5')\n", (6156, 6240), True, 'import tensorflow as tf\n'), ((8425, 8538), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""bn6"""'}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn6')\n", (8454, 8538), True, 'import tensorflow as tf\n'), ((14081, 14138), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder_phone"""'], {'reuse': 'reuse_variables'}), "('encoder_phone', reuse=reuse_variables)\n", (14098, 14138), True, 'import tensorflow as tf\n'), ((18003, 18112), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['mu'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""zp_bn"""'}), "(mu, momentum=params.batchnorm_momentum,\n training=is_training, name='zp_bn')\n", (18032, 18112), True, 'import tensorflow as tf\n'), ((9512, 9621), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['mu'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': '"""zs_bn"""'}), "(mu, momentum=params.batchnorm_momentum,\n training=is_training, name='zs_bn')\n", (9541, 9621), True, 'import tensorflow as tf\n'), ((11895, 11935), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': '(True)'}), "('encoder', reuse=True)\n", (11912, 11935), True, 'import tensorflow as tf\n'), ((15856, 15994), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': "('phn_bn%d' % (layer_index + 1))"}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='phn_bn%d' % (layer_index + 1))\n", (15885, 15994), True, 'import tensorflow as tf\n'), ((16480, 16508), 'tensorflow.squeeze', 'tf.squeeze', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (16490, 16508), True, 'import tensorflow as tf\n'), ((1916, 1978), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (1948, 1978), True, 'import tensorflow as tf\n'), ((2920, 2982), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (2952, 2982), True, 'import tensorflow as tf\n'), ((3892, 3954), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (3924, 3954), True, 'import tensorflow as tf\n'), ((4903, 4965), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (4935, 4965), True, 'import tensorflow as tf\n'), ((5915, 5977), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (5947, 5977), True, 'import tensorflow as tf\n'), ((7725, 7804), 'model.multitask_v1.pooling.statistics_pooling_v2', 'statistics_pooling_v2', (['features', 'feature_length', 'endpoints', 'params', 'is_training'], {}), '(features, feature_length, endpoints, params, is_training)\n', (7746, 7804), False, 'from model.multitask_v1.pooling import statistics_pooling_v2\n'), ((8213, 8275), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (8245, 8275), True, 'import tensorflow as tf\n'), ((9199, 9261), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (9231, 9261), True, 'import tensorflow as tf\n'), ((13659, 13793), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['features'], {'momentum': 'params.batchnorm_momentum', 'training': 'is_training', 'name': "('bn%d' % (layer_index + 1))"}), "(features, momentum=params.batchnorm_momentum,\n training=is_training, name='bn%d' % (layer_index + 1))\n", (13688, 13793), True, 'import tensorflow as tf\n'), ((16429, 16449), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (16439, 16449), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((17842, 17904), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (17874, 17904), True, 'import tensorflow as tf\n'), ((14385, 14417), 'tensorflow.expand_dims', 'tf.expand_dims', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (14399, 14417), True, 'import tensorflow as tf\n'), ((12204, 12236), 'tensorflow.expand_dims', 'tf.expand_dims', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (12218, 12236), True, 'import tensorflow as tf\n'), ((14326, 14346), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (14336, 14346), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((14870, 14932), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (14902, 14932), True, 'import tensorflow as tf\n'), ((15280, 15308), 'tensorflow.squeeze', 'tf.squeeze', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (15290, 15308), True, 'import tensorflow as tf\n'), ((7317, 7337), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (7327, 7337), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((7342, 7366), 'model.common.shape_list', 'shape_list', (['phone_labels'], {}), '(phone_labels)\n', (7352, 7366), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((12080, 12100), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (12090, 12100), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((12713, 12775), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (12745, 12775), True, 'import tensorflow as tf\n'), ((13131, 13159), 'tensorflow.squeeze', 'tf.squeeze', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (13141, 13159), True, 'import tensorflow as tf\n'), ((15221, 15241), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (15231, 15241), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((15565, 15627), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (15597, 15627), True, 'import tensorflow as tf\n'), ((13005, 13025), 'model.common.shape_list', 'shape_list', (['features'], {}), '(features)\n', (13015, 13025), False, 'from model.common import l2_scaling, shape_list, prelu\n'), ((13432, 13494), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['params.weight_l2_regularizer'], {}), '(params.weight_l2_regularizer)\n', (13464, 13494), True, 'import tensorflow as tf\n')]
|
import unittest
from katas.kyu_8.volume_of_a_cuboid import get_volume_of_cuboid
class GetVolumeOfCuboidTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(get_volume_of_cuboid(1, 2, 2), 4)
def test_equal_2(self):
self.assertEqual(get_volume_of_cuboid(6.3, 2, 5), 63)
|
[
"katas.kyu_8.volume_of_a_cuboid.get_volume_of_cuboid"
] |
[((188, 217), 'katas.kyu_8.volume_of_a_cuboid.get_volume_of_cuboid', 'get_volume_of_cuboid', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (208, 217), False, 'from katas.kyu_8.volume_of_a_cuboid import get_volume_of_cuboid\n'), ((276, 307), 'katas.kyu_8.volume_of_a_cuboid.get_volume_of_cuboid', 'get_volume_of_cuboid', (['(6.3)', '(2)', '(5)'], {}), '(6.3, 2, 5)\n', (296, 307), False, 'from katas.kyu_8.volume_of_a_cuboid import get_volume_of_cuboid\n')]
|
"""
Copyright contributors to the Application Gateway project
"""
import logging
import yaml
import os
import sys
from functools import reduce
logger = logging.getLogger(__name__)
class Environment(object):
"""
This class is used to manage access to the configuration data for the
test run.
"""
# Our configuration data. The fact that this is defined as a class
# level variables means that it will be static and we only have to
# load the configuration a single time.
config_ = None
iag_user = 5001
iag_group = 1000
def __init__(self):
super(Environment, self).__init__()
Environment.loadConfig()
@staticmethod
def get(path):
"""
Retrieve the configuration data with the specified path. The path
is a JSON path which uses a period as a path separator. For example:
'container.image_name'.
"""
# The first thing which we need to do is to see if the path is
# available as an environment variable.
if path in os.environ:
return os.environ[path]
# If the path is not available as an environment variable we now
# check to see if it is contained in our configuration files.
Environment.loadConfig()
value = reduce(lambda acc, nxt: acc[nxt], path.split("."),
Environment.config_)
if value is None:
message = "The {0} configuration value is missing!".format(path)
logger.error(message)
raise Exception(message)
return value
@staticmethod
def loadConfig():
"""
Load the configuration for this test run. The configuration is loaded
into a static variable so that we only perform the load a single time.
"""
if Environment.config_ is None:
# We always load our default yaml file, but can also be instructed
# to augment this configuration with an additional yaml file. This
# allows us to specify a yaml file which over-rides certain
# configuration entries from the default yaml file.
config_files = [
os.path.abspath(os.path.dirname(__file__) +
"/../../../../../autotest/etc/config.yaml")
]
if "CONFIG_FILE" in os.environ:
config_files.append(os.environ['CONFIG_FILE'])
Environment.config_ = {}
for config_file in config_files:
if os.path.isfile(config_file):
with open(config_file, 'r') as stream:
try:
Environment.config_.update(yaml.safe_load(stream))
except yaml.YAMLError as exc:
logger.critical(exc)
sys.exit(1)
logger.debug("Configuration: {0}".format(Environment.config_))
def is_container_context():
return os.environ.get('IS_CONTAINER', "false") == "true"
|
[
"os.path.dirname",
"os.environ.get",
"os.path.isfile",
"yaml.safe_load",
"sys.exit",
"logging.getLogger"
] |
[((156, 183), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (173, 183), False, 'import logging\n'), ((3006, 3045), 'os.environ.get', 'os.environ.get', (['"""IS_CONTAINER"""', '"""false"""'], {}), "('IS_CONTAINER', 'false')\n", (3020, 3045), False, 'import os\n'), ((2542, 2569), 'os.path.isfile', 'os.path.isfile', (['config_file'], {}), '(config_file)\n', (2556, 2569), False, 'import os\n'), ((2215, 2240), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2230, 2240), False, 'import os\n'), ((2714, 2736), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (2728, 2736), False, 'import yaml\n'), ((2870, 2881), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2878, 2881), False, 'import sys\n')]
|
import shutil
import os
import zipfile
with zipfile.ZipFile('Backup.zip','w') as my_zip:
my_zip.write('Finance.db')
src='Backup.zip'
dst='E:/backup'
shutil.copy(src=src,dst=dst)
|
[
"zipfile.ZipFile",
"shutil.copy"
] |
[((167, 196), 'shutil.copy', 'shutil.copy', ([], {'src': 'src', 'dst': 'dst'}), '(src=src, dst=dst)\n', (178, 196), False, 'import shutil\n'), ((49, 83), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""Backup.zip"""', '"""w"""'], {}), "('Backup.zip', 'w')\n", (64, 83), False, 'import zipfile\n')]
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.NaampadObject import NaampadObject
from OTLMOW.OTLModel.Datatypes.KlPadNetwerkprotectie import KlPadNetwerkprotectie
from OTLMOW.GeometrieArtefact.GeenGeometrie import GeenGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Pad(NaampadObject, GeenGeometrie):
"""Een aaneengesloten reeks van links die samen een verbinding realiseren over het netwerk, gebruik makende van eenzelfde technologie (vb SDH, OTN…)."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
NaampadObject.__init__(self)
GeenGeometrie.__init__(self)
self._netwerkprotectie = OTLAttribuut(field=KlPadNetwerkprotectie,
naam='netwerkprotectie',
label='netwerkprotectie',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad.netwerkprotectie',
kardinaliteit_max='*',
definition='Referentie van het pad dat redundantie levert aan dit pad.',
owner=self)
@property
def netwerkprotectie(self):
"""Referentie van het pad dat redundantie levert aan dit pad."""
return self._netwerkprotectie.get_waarde()
@netwerkprotectie.setter
def netwerkprotectie(self, value):
self._netwerkprotectie.set_waarde(value, owner=self)
|
[
"OTLMOW.OTLModel.BaseClasses.OTLAttribuut.OTLAttribuut",
"OTLMOW.OTLModel.Classes.NaampadObject.NaampadObject.__init__",
"OTLMOW.GeometrieArtefact.GeenGeometrie.GeenGeometrie.__init__"
] |
[((750, 778), 'OTLMOW.OTLModel.Classes.NaampadObject.NaampadObject.__init__', 'NaampadObject.__init__', (['self'], {}), '(self)\n', (772, 778), False, 'from OTLMOW.OTLModel.Classes.NaampadObject import NaampadObject\n'), ((787, 815), 'OTLMOW.GeometrieArtefact.GeenGeometrie.GeenGeometrie.__init__', 'GeenGeometrie.__init__', (['self'], {}), '(self)\n', (809, 815), False, 'from OTLMOW.GeometrieArtefact.GeenGeometrie import GeenGeometrie\n'), ((850, 1161), 'OTLMOW.OTLModel.BaseClasses.OTLAttribuut.OTLAttribuut', 'OTLAttribuut', ([], {'field': 'KlPadNetwerkprotectie', 'naam': '"""netwerkprotectie"""', 'label': '"""netwerkprotectie"""', 'objectUri': '"""https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad.netwerkprotectie"""', 'kardinaliteit_max': '"""*"""', 'definition': '"""Referentie van het pad dat redundantie levert aan dit pad."""', 'owner': 'self'}), "(field=KlPadNetwerkprotectie, naam='netwerkprotectie', label=\n 'netwerkprotectie', objectUri=\n 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Pad.netwerkprotectie'\n , kardinaliteit_max='*', definition=\n 'Referentie van het pad dat redundantie levert aan dit pad.', owner=self)\n", (862, 1161), False, 'from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut\n')]
|
from app import db_main, app
from app.helper.sqlalchemy import ObjectIDField
from sqlalchemy.ext.declarative import declared_attr
from datetime import datetime
from re import sub as re_sub
from bson.objectid import ObjectId
from bson.errors import InvalidId
from werkzeug.routing import BaseConverter, ValidationError
from functools import wraps
@app.after_request
def after_request(response):
# Check status
if not int(response._status_code or 0) == 500:
db_main.session.commit()
else:
db_main.session.rollback()
# Bypass response
return response
def auto_commit():
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
try:
# Commit
db_main.session.commit()
return f(*args, **kwargs)
except Exception as e:
# Rollback transaction
db_main.session.rollback()
raise e
return decorated
return decorator
def objectid(value):
try:
return ObjectId(value)
except (InvalidId, ValueError, TypeError):
return None
def transform_table_name(text):
s0 = text.replace("Model", "")
s1 = re_sub('(.)([A-Z][a-z]+)', r'\1_\2', s0)
s2 = re_sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return s2
class ObjectIDConverter(BaseConverter):
def to_python(self, value):
try:
return ObjectId(str(value))
except (InvalidId, ValueError, TypeError):
raise ValidationError()
def to_url(self, value):
return str(value)
class CopiedData():
def __init__(self, data, tablename):
# Update data
self.__dict__.update(data)
self.__tablename__ = tablename
def __getattr__(self, name):
return None
class Database(db_main.Model):
__abstract__ = True
@declared_attr
def __tablename__(cls):
return transform_table_name(cls.__name__)
_utc_now = datetime.utcnow
_object_id = ObjectId
id_ = db_main.Column(ObjectIDField(32), default=_object_id, index=True, primary_key=True)
created = db_main.Column(db_main.DateTime(), default=_utc_now)
modified = db_main.Column(db_main.DateTime(), default=_utc_now, onupdate=_utc_now)
deleted = db_main.Column(db_main.Integer, index=True, default=0) # 1-Soft delete
def add(self):
# Add to transaction
db_main.session.add(self)
# End
return self
def save(self):
# Add to transaction
db_main.session.add(self)
db_main.session.flush()
# End
return self
def remove(self):
# Remove record
db_main.session.delete(self)
db_main.session.flush()
# End
return True
def delete(self):
# Set delete parameter
self.deleted = 1
db_main.session.flush()
# End
return True
def detach_copy(self):
# Container
data = {}
# Get data
for attr, val in self.__dict__.items():
# Check
if not attr.startswith("_"):
data[attr] = val
# End
return CopiedData(data, self.__tablename__)
|
[
"app.db_main.session.flush",
"app.db_main.session.add",
"bson.objectid.ObjectId",
"app.db_main.session.rollback",
"app.db_main.session.delete",
"app.db_main.session.commit",
"app.db_main.DateTime",
"app.helper.sqlalchemy.ObjectIDField",
"werkzeug.routing.ValidationError",
"functools.wraps",
"re.sub",
"app.db_main.Column"
] |
[((1207, 1248), 're.sub', 're_sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 's0'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', s0)\n", (1213, 1248), True, 'from re import sub as re_sub\n'), ((2280, 2334), 'app.db_main.Column', 'db_main.Column', (['db_main.Integer'], {'index': '(True)', 'default': '(0)'}), '(db_main.Integer, index=True, default=0)\n', (2294, 2334), False, 'from app import db_main, app\n'), ((474, 498), 'app.db_main.session.commit', 'db_main.session.commit', ([], {}), '()\n', (496, 498), False, 'from app import db_main, app\n'), ((517, 543), 'app.db_main.session.rollback', 'db_main.session.rollback', ([], {}), '()\n', (541, 543), False, 'from app import db_main, app\n'), ((638, 646), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (643, 646), False, 'from functools import wraps\n'), ((1046, 1061), 'bson.objectid.ObjectId', 'ObjectId', (['value'], {}), '(value)\n', (1054, 1061), False, 'from bson.objectid import ObjectId\n'), ((2043, 2060), 'app.helper.sqlalchemy.ObjectIDField', 'ObjectIDField', (['(32)'], {}), '(32)\n', (2056, 2060), False, 'from app.helper.sqlalchemy import ObjectIDField\n'), ((2141, 2159), 'app.db_main.DateTime', 'db_main.DateTime', ([], {}), '()\n', (2157, 2159), False, 'from app import db_main, app\n'), ((2209, 2227), 'app.db_main.DateTime', 'db_main.DateTime', ([], {}), '()\n', (2225, 2227), False, 'from app import db_main, app\n'), ((2409, 2434), 'app.db_main.session.add', 'db_main.session.add', (['self'], {}), '(self)\n', (2428, 2434), False, 'from app import db_main, app\n'), ((2527, 2552), 'app.db_main.session.add', 'db_main.session.add', (['self'], {}), '(self)\n', (2546, 2552), False, 'from app import db_main, app\n'), ((2561, 2584), 'app.db_main.session.flush', 'db_main.session.flush', ([], {}), '()\n', (2582, 2584), False, 'from app import db_main, app\n'), ((2674, 2702), 'app.db_main.session.delete', 'db_main.session.delete', (['self'], {}), '(self)\n', (2696, 2702), False, 'from app import db_main, app\n'), ((2711, 2734), 'app.db_main.session.flush', 'db_main.session.flush', ([], {}), '()\n', (2732, 2734), False, 'from app import db_main, app\n'), ((2856, 2879), 'app.db_main.session.flush', 'db_main.session.flush', ([], {}), '()\n', (2877, 2879), False, 'from app import db_main, app\n'), ((1257, 1299), 're.sub', 're_sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (1263, 1299), True, 'from re import sub as re_sub\n'), ((745, 769), 'app.db_main.session.commit', 'db_main.session.commit', ([], {}), '()\n', (767, 769), False, 'from app import db_main, app\n'), ((1517, 1534), 'werkzeug.routing.ValidationError', 'ValidationError', ([], {}), '()\n', (1532, 1534), False, 'from werkzeug.routing import BaseConverter, ValidationError\n'), ((902, 928), 'app.db_main.session.rollback', 'db_main.session.rollback', ([], {}), '()\n', (926, 928), False, 'from app import db_main, app\n')]
|
'''
This code defines the object relational model classes that sqlalchemy uses.
Each class maps to a table in the database.
'''
# Create the sqlite database and map python objects
# Configuration: import all modules needed
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean
# Configuration: Create instance of declarative base (class code will inherit this)
Base = declarative_base()
# Representation of the user database table as a python class.
class User(Base):
__tablename__ = 'user'
# id is auto-incremented.
id = Column(Integer, primary_key=True)
email = Column(String(150), nullable=False)
password = Column(String(150), nullable=False)
# Representation of the playlist table and relationship as a python class.
class Playlist(Base):
__tablename__ = 'playlist'
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
user = relationship(User)
# Representation of the artist table as a python class.
class Artist(Base):
__tablename__ = 'artist'
id = Column(Integer, primary_key=True)
uri = Column(String(50), nullable=False)
name = Column(String(150), nullable=False)
popularity = Column(Integer, nullable=False)
followers = Column(Integer, nullable=False)
# Representation of the album table and relationship as a python class.
class Album(Base):
__tablename__ = 'album'
id = Column(Integer, primary_key=True)
uri = Column(String(50), nullable=False)
name = Column(String(150), nullable=False)
release_date = Column(Date, nullable=False)
artist_id = Column(Integer, ForeignKey('artist.id'), nullable=False)
artist = relationship(Artist, backref="artist")
# JSON objects in a serializable format
@property
def serialize(self):
return {
'id': self.id,
'name': self.name,
'uri': self.uri,
'release_date': self.release_date,
'artist_id': self.artist_id,
}
# Representation of the playlist item table and relationship as a python class.
class PlaylistItem(Base):
__tablename__ = 'playlist_item'
id = Column(Integer, primary_key=True)
playlist_id = Column(Integer, ForeignKey('playlist.id'), nullable=False)
song_id = Column(Integer, ForeignKey('song.id'), nullable=False)
playlist = relationship(Playlist, backref="playlist")
# Representation of the featuring item table and relationship as a python class.
class FeaturingItem(Base):
__tablename__ = 'featuring_item'
id = Column(Integer, primary_key=True)
artist_id = Column(Integer, ForeignKey('artist.id'), nullable=False)
song_id = Column(Integer, ForeignKey('song.id'), nullable=False)
featuringArtist = relationship(Artist, backref="featuringArtist")
# Representation of the song item table and relationship as a python class.
class Song(Base):
__tablename__ = 'song'
id = Column(Integer, primary_key=True)
uri = Column(String(50), nullable=False)
track_number = Column(Integer, nullable=True)
name = Column(String(250), nullable=False)
popularity = Column(Integer, nullable=True)
duration = Column(Integer, nullable=False)
danceability = Column(Float, nullable=True)
explicit = Column(Boolean, nullable=True)
tempo = Column(Float, nullable=True)
energy = Column(Float, nullable=True)
instrumentalness = Column(Float, nullable=True)
time_signature = Column(Integer, nullable=True)
valence = Column(Float, nullable=True)
album_id = Column(Integer, ForeignKey('album.id'), nullable=False)
album = relationship(Album, backref="album")
|
[
"sqlalchemy.ForeignKey",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column",
"sqlalchemy.String"
] |
[((495, 513), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (511, 513), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((664, 697), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (670, 697), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((937, 970), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (943, 970), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1098, 1116), 'sqlalchemy.orm.relationship', 'relationship', (['User'], {}), '(User)\n', (1110, 1116), False, 'from sqlalchemy.orm import relationship\n'), ((1234, 1267), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1240, 1267), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1377, 1408), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (1383, 1408), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1425, 1456), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (1431, 1456), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1588, 1621), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1594, 1621), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1733, 1761), 'sqlalchemy.Column', 'Column', (['Date'], {'nullable': '(False)'}), '(Date, nullable=False)\n', (1739, 1761), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1849, 1887), 'sqlalchemy.orm.relationship', 'relationship', (['Artist'], {'backref': '"""artist"""'}), "(Artist, backref='artist')\n", (1861, 1887), False, 'from sqlalchemy.orm import relationship\n'), ((2328, 2361), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2334, 2361), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2524, 2566), 'sqlalchemy.orm.relationship', 'relationship', (['Playlist'], {'backref': '"""playlist"""'}), "(Playlist, backref='playlist')\n", (2536, 2566), False, 'from sqlalchemy.orm import relationship\n'), ((2724, 2757), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2730, 2757), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2923, 2970), 'sqlalchemy.orm.relationship', 'relationship', (['Artist'], {'backref': '"""featuringArtist"""'}), "(Artist, backref='featuringArtist')\n", (2935, 2970), False, 'from sqlalchemy.orm import relationship\n'), ((3104, 3137), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3110, 3137), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3202, 3232), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (3208, 3232), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3297, 3327), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (3303, 3327), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3343, 3374), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (3349, 3374), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3394, 3422), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (3400, 3422), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3438, 3468), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(True)'}), '(Boolean, nullable=True)\n', (3444, 3468), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3481, 3509), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (3487, 3509), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3523, 3551), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (3529, 3551), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3575, 3603), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (3581, 3603), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3625, 3655), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (3631, 3655), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3670, 3698), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (3676, 3698), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3783, 3819), 'sqlalchemy.orm.relationship', 'relationship', (['Album'], {'backref': '"""album"""'}), "(Album, backref='album')\n", (3795, 3819), False, 'from sqlalchemy.orm import relationship\n'), ((717, 728), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (723, 728), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((768, 779), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (774, 779), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((989, 999), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (995, 999), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1047, 1068), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (1057, 1068), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1285, 1295), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (1291, 1295), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1331, 1342), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (1337, 1342), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1639, 1649), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (1645, 1649), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1685, 1696), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (1691, 1696), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((1794, 1817), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""artist.id"""'], {}), "('artist.id')\n", (1804, 1817), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2396, 2421), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""playlist.id"""'], {}), "('playlist.id')\n", (2406, 2421), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2469, 2490), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""song.id"""'], {}), "('song.id')\n", (2479, 2490), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2790, 2813), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""artist.id"""'], {}), "('artist.id')\n", (2800, 2813), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((2861, 2882), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""song.id"""'], {}), "('song.id')\n", (2871, 2882), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3155, 3165), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (3161, 3165), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3251, 3262), 'sqlalchemy.String', 'String', (['(250)'], {}), '(250)\n', (3257, 3262), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n'), ((3730, 3752), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""album.id"""'], {}), "('album.id')\n", (3740, 3752), False, 'from sqlalchemy import Column, ForeignKey, Integer, String, Date, Float, Boolean\n')]
|
"""
Django settings for professorSheetCheap project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
try:
from professorSheetCheap import local_settings
except ImportError:
from professorSheetCheap import local_settings_sample as local_settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'analytics'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'professorSheetCheap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
WSGI_APPLICATION = 'professorSheetCheap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# LOG
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# API SIGAA
BASE_URL ='http://localhost:8000'
API_SIGAA_BASE_URL = 'https://api.info.ufrn.br'
API_SIGAA = {
'BASE_URL': API_SIGAA_BASE_URL,
'AUTH_URL': 'https://autenticacao.info.ufrn.br/authz-server/oauth',
'REDIRECT_URI': BASE_URL + "/authenticate",
'CREDENTIALS': local_settings.API_SIGAA_CREDENTIALS,
'ENDPOINTS': {
'USUARIO': API_SIGAA_BASE_URL + '/usuario/v0.1/usuarios',
'VINCULO': API_SIGAA_BASE_URL + '/vinculo/v0.1/vinculos',
'DISCENTE': API_SIGAA_BASE_URL + '/discente/v0.1/discentes',
'CURSO': API_SIGAA_BASE_URL + '/curso/v0.1',
'MATRICULA': API_SIGAA_BASE_URL + '/matricula/v0.1/matriculas-componentes'
}
}
|
[
"os.path.abspath",
"os.path.join",
"os.getenv"
] |
[((599, 624), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (614, 624), False, 'import os\n'), ((2424, 2460), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (2436, 2460), False, 'import os\n'), ((1781, 1816), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1793, 1816), False, 'import os\n'), ((3803, 3840), 'os.getenv', 'os.getenv', (['"""DJANGO_LOG_LEVEL"""', '"""INFO"""'], {}), "('DJANGO_LOG_LEVEL', 'INFO')\n", (3812, 3840), False, 'import os\n')]
|
import logging
import os
import colorama
from . import locked
from ..exceptions import RecursiveAddingWhileUsingFilename
from ..output.base import OutputDoesNotExistError
from ..progress import Tqdm
from ..repo.scm_context import scm_context
from ..stage import Stage
from ..utils import LARGE_DIR_SIZE
logger = logging.getLogger(__name__)
@locked
@scm_context
def add(repo, targets, recursive=False, no_commit=False, fname=None):
if recursive and fname:
raise RecursiveAddingWhileUsingFilename()
if isinstance(targets, str):
targets = [targets]
stages_list = []
num_targets = len(targets)
with Tqdm(total=num_targets, desc="Add", unit="file", leave=True) as pbar:
if num_targets == 1:
# clear unneeded top-level progress bar for single target
pbar.bar_format = "Adding..."
pbar.refresh()
for target in targets:
sub_targets = _find_all_targets(repo, target, recursive)
pbar.total += len(sub_targets) - 1
if os.path.isdir(target) and len(sub_targets) > LARGE_DIR_SIZE:
logger.warning(
"You are adding a large directory '{target}' recursively,"
" consider tracking it as a whole instead.\n"
"{purple}HINT:{nc} Remove the generated DVC-file and then"
" run `{cyan}dvc add {target}{nc}`".format(
purple=colorama.Fore.MAGENTA,
cyan=colorama.Fore.CYAN,
nc=colorama.Style.RESET_ALL,
target=target,
)
)
stages = _create_stages(repo, sub_targets, fname, pbar=pbar)
repo.check_modified_graph(stages)
with Tqdm(
total=len(stages),
desc="Processing",
unit="file",
disable=True if len(stages) == 1 else None,
) as pbar_stages:
for stage in stages:
try:
stage.save()
except OutputDoesNotExistError:
pbar.n -= 1
raise
if not no_commit:
stage.commit()
stage.dump()
pbar_stages.update()
stages_list += stages
if num_targets == 1: # restore bar format for stats
pbar.bar_format = pbar.BAR_FMT_DEFAULT
return stages_list
def _find_all_targets(repo, target, recursive):
if os.path.isdir(target) and recursive:
return [
fname
for fname in Tqdm(
repo.tree.walk_files(target),
desc="Searching " + target,
bar_format=Tqdm.BAR_FMT_NOTOTAL,
unit="file",
)
if not repo.is_dvc_internal(fname)
if not Stage.is_stage_file(fname)
if not repo.scm.belongs_to_scm(fname)
if not repo.scm.is_tracked(fname)
]
return [target]
def _create_stages(repo, targets, fname, pbar=None):
stages = []
for out in Tqdm(
targets,
desc="Creating DVC-files",
disable=True if len(targets) < LARGE_DIR_SIZE else None,
unit="file",
):
stage = Stage.create(repo, outs=[out], add=True, fname=fname)
if not stage:
if pbar is not None:
pbar.total -= 1
continue
stages.append(stage)
if pbar is not None:
pbar.update_desc(out)
return stages
|
[
"os.path.isdir",
"logging.getLogger"
] |
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((2588, 2609), 'os.path.isdir', 'os.path.isdir', (['target'], {}), '(target)\n', (2601, 2609), False, 'import os\n'), ((1039, 1060), 'os.path.isdir', 'os.path.isdir', (['target'], {}), '(target)\n', (1052, 1060), False, 'import os\n')]
|
from Utils.Array import input_array
class Node:
def __init__(self, data):
self.data = data
self.next = None
def input_linked_list() -> Node:
"""
In our case, we do not need -1 to stop taking input
"""
linked_list_data = input_array()
temporary_node = Node(-9999999)
head = tail = temporary_node # To get rid off null check on head, in the loop
# tail : will work as the moving_pointer
for data in linked_list_data:
new_node = Node(data)
tail.next = new_node
tail = tail.next
return head.next # passing over the temporary_node
def input_linked_list_with_end_value(end_of_linked_list=-1) -> Node:
"""
Here to end the linked list input, end_of_linked_list value is required
By default the end_of_linked_list value is -1
"""
linked_list_data = input_array("LL :")
temporary_node = Node(-9999999)
head = tail = temporary_node # To get rid off null check on head, in the loop
# tail : will work as the moving_pointer
for data in linked_list_data:
if data == end_of_linked_list:
break
new_node = Node(data)
tail.next = new_node
tail = tail.next
return head.next # passing over the temporary_node
def print_linked_list(head) -> None:
linked_list_data___in_str_format = []
while head is not None:
linked_list_data___in_str_format.append(str(head.data))
head = head.next
print(" -> ".join(linked_list_data___in_str_format))
def get_linked_list_size(head) -> int:
length = 0
while head is not None:
length += 1
head = head.next
return length
if __name__ == '__main__':
head_pointer = input_linked_list()
print_linked_list(head_pointer)
linked_list_length = get_linked_list_size(head_pointer)
print(linked_list_length)
"""
3 4 5 2 6 1 9
10 76 39 -3 2 9 -23 9
"""
|
[
"Utils.Array.input_array"
] |
[((260, 273), 'Utils.Array.input_array', 'input_array', ([], {}), '()\n', (271, 273), False, 'from Utils.Array import input_array\n'), ((851, 870), 'Utils.Array.input_array', 'input_array', (['"""LL :"""'], {}), "('LL :')\n", (862, 870), False, 'from Utils.Array import input_array\n')]
|
from numpy.random import choice
from statistics import mode
KEY = 0
VALUE = 1
TWICE = 2
THRICE = 3
PROBABILITY = [0.30, 0.265, 0.179, 0.129, 0.073, 0.035, 0.019]
NOTHING = ":x:"
CHERRY = ":cherries:"
BLUEBERRY = ":blueberries:"
COIN = ":coin:"
CARD = ":credit_card:"
GEM = ":gem:"
EIGHTBALL = ":8ball:"
SLOT = [NOTHING, CHERRY, BLUEBERRY, COIN, CARD, GEM, EIGHTBALL]
JACKPOT = {NOTHING * THRICE: 0,
CHERRY * THRICE: 50,
CHERRY * TWICE: 0.5,
BLUEBERRY * THRICE: 150,
BLUEBERRY * TWICE: 0.7,
COIN * THRICE: 500,
COIN * TWICE: 1.5,
CARD * THRICE: 1500,
GEM * THRICE: 2500,
EIGHTBALL * THRICE: 25000
}
average = []
class SlotMachine:
def __init__(self, cash):
self.cash = cash
@staticmethod
def spin():
row = choice(SLOT, size=THRICE, p=PROBABILITY)
key = "".join(row[:])
if key.count(CHERRY) == TWICE or key.count(BLUEBERRY) == TWICE or key.count(COIN) == TWICE:
key = mode(row) * TWICE
return key, "".join(row)
def pot(self):
result = self.spin()
key = result[KEY]
value = result[VALUE]
if key in JACKPOT:
cash = JACKPOT[key] * self.cash
return cash, value
else:
return value
|
[
"statistics.mode",
"numpy.random.choice"
] |
[((849, 889), 'numpy.random.choice', 'choice', (['SLOT'], {'size': 'THRICE', 'p': 'PROBABILITY'}), '(SLOT, size=THRICE, p=PROBABILITY)\n', (855, 889), False, 'from numpy.random import choice\n'), ((1038, 1047), 'statistics.mode', 'mode', (['row'], {}), '(row)\n', (1042, 1047), False, 'from statistics import mode\n')]
|
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib import messages
from django.contrib.auth import (
get_user_model, update_session_auth_hash
)
from django.contrib.auth.forms import (
PasswordResetForm, SetPasswordForm, PasswordChangeForm
)
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.template import Context
from django.template.loader import get_template
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import User
from django.contrib.auth import logout, login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url, render, redirect
from django.utils.encoding import force_text
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views import View
from django.template.response import TemplateResponse
from django.template.loader import render_to_string
from .tokens import account_activation_token
from .forms import SignUpForm, ProfileForm, ContactForm
def contact_unsigned(request):
form_class = ContactForm()
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name', '')
contact_email = request.POST.get(
'contact_email', '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
})
content = template.render(context)
email = EmailMessage(
"ERC Customer Support",
content,
"Your website" + '',
['<EMAIL>'],
headers={'Reply-To': contact_email}
)
email.send()
return redirect('session:contact')
return render(request, 'session/contact_u.html', {
'form': form_class,
})
@login_required
def contact(request):
form_class = ContactForm()
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name', '')
contact_email = request.POST.get(
'contact_email', '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
"id": request.user.id
})
content = template.render(context)
email = EmailMessage(
"ERC Customer Support",
content,
"Your website" + '',
['<EMAIL>'],
headers={'Reply-To': contact_email}
)
email.send()
return redirect('session:contact')
return render(request, 'session/contact.html', {
'form': form_class,
})
class AccountUpdate(LoginRequiredMixin, View):
def post(self, request):
print(request.POST)
_method = request.POST.get("_method", None)
print(_method)
if _method == "update":
account_form = ProfileForm(
instance=request.user, data=request.POST)
if account_form.is_valid():
user = account_form.save()
update_session_auth_hash(request, user) # Important!
messages.success(
request, 'Your account was successfully updated!')
return redirect('session:account-details')
else:
form = PasswordChangeForm(instance=request.user)
messages.error(request, 'Please correct the error below.')
return render(request, 'session/account_details.html',
{"form": form, "account_form": account_form})
elif _method == "password":
form = PasswordChangeForm(request.user, data=request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(
request, 'Your password was successfully updated!')
return redirect('session:account-details')
else:
account_form = ProfileForm(instance=request.user)
messages.error(request, 'Please correct the error below.')
return render(request, 'session/account_details.html',
{"form": form, "account_form": account_form})
def get(self, request):
form = PasswordChangeForm(request.user)
account_form = ProfileForm(instance=request.user)
return render(request, 'session/account_details.html', {
'form': form,
'account_form': account_form
})
def logoutUser(request):
if request.user and request.user.is_authenticated():
logout(request)
return redirect("session:home")
return redirect("session:login")
def account_activation_sent(request):
return render(request, 'session/account_activation_sent.html')
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.profile.email_confirmed = True
user.save()
login(request, user)
return redirect('records:index')
else:
return render(request, 'session/account_activation_invalid.html')
class AccountCreation(View):
def get(self, request):
form = SignUpForm()
return render(request, 'session/signup.html', {'form': form})
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your MySite Account'
message = render_to_string(
'session/account_activation_email.html',
{
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message)
return redirect('session:account_activation_sent')
return render(request, 'session/signup.html', {'form': form})
def about(request):
return render(request, 'session/about.html')
class HomeView(View):
def get(self, request):
if request.user:
if request.user.is_authenticated():
return redirect('records:index')
return render(request, "session/home.html")
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None,
html_email_template_name=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('session:password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
}
if is_admin_site:
warnings.warn(
"The is_admin_site argument to "
"django.contrib.auth.views.password_reset() is deprecated "
"and will be removed in Django 1.10."
)
opts = dict(opts, domain_override=request.get_host())
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('session:password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
return TemplateResponse(request, template_name, context)
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
return TemplateResponse(request, template_name, context)
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
return TemplateResponse(request, template_name, context)
|
[
"django.core.urlresolvers.reverse",
"django.contrib.messages.error",
"django.http.HttpResponseRedirect",
"django.contrib.auth.login",
"django.contrib.auth.models.User.objects.get",
"django.contrib.auth.logout",
"django.contrib.auth.forms.PasswordChangeForm",
"django.shortcuts.render",
"django.utils.translation.ugettext",
"django.template.loader.get_template",
"django.core.mail.EmailMessage",
"django.shortcuts.resolve_url",
"django.utils.http.urlsafe_base64_decode",
"django.views.decorators.debug.sensitive_post_parameters",
"django.contrib.messages.success",
"django.utils.encoding.force_bytes",
"django.shortcuts.redirect",
"django.contrib.auth.get_user_model",
"django.template.Context",
"django.template.response.TemplateResponse",
"django.contrib.sites.shortcuts.get_current_site",
"warnings.warn",
"django.contrib.auth.update_session_auth_hash"
] |
[((10141, 10168), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (10166, 10168), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((2675, 2738), 'django.shortcuts.render', 'render', (['request', '"""session/contact_u.html"""', "{'form': form_class}"], {}), "(request, 'session/contact_u.html', {'form': form_class})\n", (2681, 2738), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((3878, 3939), 'django.shortcuts.render', 'render', (['request', '"""session/contact.html"""', "{'form': form_class}"], {}), "(request, 'session/contact.html', {'form': form_class})\n", (3884, 3939), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6042, 6067), 'django.shortcuts.redirect', 'redirect', (['"""session:login"""'], {}), "('session:login')\n", (6050, 6067), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6119, 6174), 'django.shortcuts.render', 'render', (['request', '"""session/account_activation_sent.html"""'], {}), "(request, 'session/account_activation_sent.html')\n", (6125, 6174), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((7777, 7814), 'django.shortcuts.render', 'render', (['request', '"""session/about.html"""'], {}), "(request, 'session/about.html')\n", (7783, 7814), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((10088, 10137), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (10104, 10137), False, 'from django.template.response import TemplateResponse\n'), ((10717, 10733), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (10731, 10733), False, 'from django.contrib.auth import get_user_model, update_session_auth_hash\n'), ((12048, 12097), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (12064, 12097), False, 'from django.template.response import TemplateResponse\n'), ((12573, 12622), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (12589, 12622), False, 'from django.template.response import TemplateResponse\n'), ((13024, 13073), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (13040, 13073), False, 'from django.template.response import TemplateResponse\n'), ((5649, 5681), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', (['request.user'], {}), '(request.user)\n', (5667, 5681), False, 'from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm\n'), ((5755, 5852), 'django.shortcuts.render', 'render', (['request', '"""session/account_details.html"""', "{'form': form, 'account_form': account_form}"], {}), "(request, 'session/account_details.html', {'form': form,\n 'account_form': account_form})\n", (5761, 5852), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((5975, 5990), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (5981, 5990), False, 'from django.contrib.auth import logout, login\n'), ((6006, 6030), 'django.shortcuts.redirect', 'redirect', (['"""session:home"""'], {}), "('session:home')\n", (6014, 6030), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6295, 6319), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'pk': 'uid'}), '(pk=uid)\n', (6311, 6319), False, 'from django.contrib.auth.models import User\n'), ((6592, 6612), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (6597, 6612), False, 'from django.contrib.auth import logout, login\n'), ((6628, 6653), 'django.shortcuts.redirect', 'redirect', (['"""records:index"""'], {}), "('records:index')\n", (6636, 6653), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6679, 6737), 'django.shortcuts.render', 'render', (['request', '"""session/account_activation_invalid.html"""'], {}), "(request, 'session/account_activation_invalid.html')\n", (6685, 6737), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6841, 6895), 'django.shortcuts.render', 'render', (['request', '"""session/signup.html"""', "{'form': form}"], {}), "(request, 'session/signup.html', {'form': form})\n", (6847, 6895), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((7689, 7743), 'django.shortcuts.render', 'render', (['request', '"""session/signup.html"""', "{'form': form}"], {}), "(request, 'session/signup.html', {'form': form})\n", (7695, 7743), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((8005, 8041), 'django.shortcuts.render', 'render', (['request', '"""session/home.html"""'], {}), "(request, 'session/home.html')\n", (8011, 8041), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((8738, 8776), 'django.core.urlresolvers.reverse', 'reverse', (['"""session:password_reset_done"""'], {}), "('session:password_reset_done')\n", (8745, 8776), False, 'from django.core.urlresolvers import reverse\n'), ((8817, 8849), 'django.shortcuts.resolve_url', 'resolve_url', (['post_reset_redirect'], {}), '(post_reset_redirect)\n', (8828, 8849), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((9902, 9921), 'django.utils.translation.ugettext', '_', (['"""Password reset"""'], {}), "('Password reset')\n", (9903, 9921), True, 'from django.utils.translation import ugettext as _\n'), ((10874, 10916), 'django.core.urlresolvers.reverse', 'reverse', (['"""session:password_reset_complete"""'], {}), "('session:password_reset_complete')\n", (10881, 10916), False, 'from django.core.urlresolvers import reverse\n'), ((10957, 10989), 'django.shortcuts.resolve_url', 'resolve_url', (['post_reset_redirect'], {}), '(post_reset_redirect)\n', (10968, 10989), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((11384, 11407), 'django.utils.translation.ugettext', '_', (['"""Enter new password"""'], {}), "('Enter new password')\n", (11385, 11407), True, 'from django.utils.translation import ugettext as _\n'), ((11756, 11788), 'django.utils.translation.ugettext', '_', (['"""Password reset unsuccessful"""'], {}), "('Password reset unsuccessful')\n", (11757, 11788), True, 'from django.utils.translation import ugettext as _\n'), ((12328, 12359), 'django.shortcuts.resolve_url', 'resolve_url', (['settings.LOGIN_URL'], {}), '(settings.LOGIN_URL)\n', (12339, 12359), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((12378, 12406), 'django.utils.translation.ugettext', '_', (['"""Password reset complete"""'], {}), "('Password reset complete')\n", (12379, 12406), True, 'from django.utils.translation import ugettext as _\n'), ((12833, 12857), 'django.utils.translation.ugettext', '_', (['"""Password reset sent"""'], {}), "('Password reset sent')\n", (12834, 12857), True, 'from django.utils.translation import ugettext as _\n'), ((2089, 2125), 'django.template.loader.get_template', 'get_template', (['"""contact_template.txt"""'], {}), "('contact_template.txt')\n", (2101, 2125), False, 'from django.template.loader import get_template\n'), ((2148, 2253), 'django.template.Context', 'Context', (["{'contact_name': contact_name, 'contact_email': contact_email,\n 'form_content': form_content}"], {}), "({'contact_name': contact_name, 'contact_email': contact_email,\n 'form_content': form_content})\n", (2155, 2253), False, 'from django.template import Context\n'), ((2381, 2502), 'django.core.mail.EmailMessage', 'EmailMessage', (['"""ERC Customer Support"""', 'content', "('Your website' + '')", "['<EMAIL>']"], {'headers': "{'Reply-To': contact_email}"}), "('ERC Customer Support', content, 'Your website' + '', [\n '<EMAIL>'], headers={'Reply-To': contact_email})\n", (2393, 2502), False, 'from django.core.mail import EmailMessage\n'), ((2636, 2663), 'django.shortcuts.redirect', 'redirect', (['"""session:contact"""'], {}), "('session:contact')\n", (2644, 2663), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((3253, 3289), 'django.template.loader.get_template', 'get_template', (['"""contact_template.txt"""'], {}), "('contact_template.txt')\n", (3265, 3289), False, 'from django.template.loader import get_template\n'), ((3312, 3440), 'django.template.Context', 'Context', (["{'contact_name': contact_name, 'contact_email': contact_email,\n 'form_content': form_content, 'id': request.user.id}"], {}), "({'contact_name': contact_name, 'contact_email': contact_email,\n 'form_content': form_content, 'id': request.user.id})\n", (3319, 3440), False, 'from django.template import Context\n'), ((3583, 3704), 'django.core.mail.EmailMessage', 'EmailMessage', (['"""ERC Customer Support"""', 'content', "('Your website' + '')", "['<EMAIL>']"], {'headers': "{'Reply-To': contact_email}"}), "('ERC Customer Support', content, 'Your website' + '', [\n '<EMAIL>'], headers={'Reply-To': contact_email})\n", (3595, 3704), False, 'from django.core.mail import EmailMessage\n'), ((3838, 3865), 'django.shortcuts.redirect', 'redirect', (['"""session:contact"""'], {}), "('session:contact')\n", (3846, 3865), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((6249, 6278), 'django.utils.http.urlsafe_base64_decode', 'urlsafe_base64_decode', (['uidb64'], {}), '(uidb64)\n', (6270, 6278), False, 'from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\n'), ((7123, 7148), 'django.contrib.sites.shortcuts.get_current_site', 'get_current_site', (['request'], {}), '(request)\n', (7139, 7148), False, 'from django.contrib.sites.shortcuts import get_current_site\n'), ((7630, 7673), 'django.shortcuts.redirect', 'redirect', (['"""session:account_activation_sent"""'], {}), "('session:account_activation_sent')\n", (7638, 7673), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((9758, 9799), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['post_reset_redirect'], {}), '(post_reset_redirect)\n', (9778, 9799), False, 'from django.http import HttpResponseRedirect\n'), ((11092, 11121), 'django.utils.http.urlsafe_base64_decode', 'urlsafe_base64_decode', (['uidb64'], {}), '(uidb64)\n', (11113, 11121), False, 'from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode\n'), ((4366, 4405), 'django.contrib.auth.update_session_auth_hash', 'update_session_auth_hash', (['request', 'user'], {}), '(request, user)\n', (4390, 4405), False, 'from django.contrib.auth import get_user_model, update_session_auth_hash\n'), ((4436, 4503), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your account was successfully updated!"""'], {}), "(request, 'Your account was successfully updated!')\n", (4452, 4503), False, 'from django.contrib import messages\n'), ((4548, 4583), 'django.shortcuts.redirect', 'redirect', (['"""session:account-details"""'], {}), "('session:account-details')\n", (4556, 4583), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((4625, 4666), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', ([], {'instance': 'request.user'}), '(instance=request.user)\n', (4643, 4666), False, 'from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm\n'), ((4683, 4741), 'django.contrib.messages.error', 'messages.error', (['request', '"""Please correct the error below."""'], {}), "(request, 'Please correct the error below.')\n", (4697, 4741), False, 'from django.contrib import messages\n'), ((4765, 4862), 'django.shortcuts.render', 'render', (['request', '"""session/account_details.html"""', "{'form': form, 'account_form': account_form}"], {}), "(request, 'session/account_details.html', {'form': form,\n 'account_form': account_form})\n", (4771, 4862), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((4944, 4995), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', (['request.user'], {'data': 'request.POST'}), '(request.user, data=request.POST)\n', (4962, 4995), False, 'from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm, PasswordChangeForm\n'), ((7964, 7989), 'django.shortcuts.redirect', 'redirect', (['"""records:index"""'], {}), "('records:index')\n", (7972, 7989), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((9415, 9564), 'warnings.warn', 'warnings.warn', (['"""The is_admin_site argument to django.contrib.auth.views.password_reset() is deprecated and will be removed in Django 1.10."""'], {}), "(\n 'The is_admin_site argument to django.contrib.auth.views.password_reset() is deprecated and will be removed in Django 1.10.'\n )\n", (9428, 9564), False, 'import warnings\n'), ((11585, 11626), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['post_reset_redirect'], {}), '(post_reset_redirect)\n', (11605, 11626), False, 'from django.http import HttpResponseRedirect\n'), ((5079, 5118), 'django.contrib.auth.update_session_auth_hash', 'update_session_auth_hash', (['request', 'user'], {}), '(request, user)\n', (5103, 5118), False, 'from django.contrib.auth import get_user_model, update_session_auth_hash\n'), ((5149, 5217), 'django.contrib.messages.success', 'messages.success', (['request', '"""Your password was successfully updated!"""'], {}), "(request, 'Your password was successfully updated!')\n", (5165, 5217), False, 'from django.contrib import messages\n'), ((5262, 5297), 'django.shortcuts.redirect', 'redirect', (['"""session:account-details"""'], {}), "('session:account-details')\n", (5270, 5297), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((5399, 5457), 'django.contrib.messages.error', 'messages.error', (['request', '"""Please correct the error below."""'], {}), "(request, 'Please correct the error below.')\n", (5413, 5457), False, 'from django.contrib import messages\n'), ((5481, 5578), 'django.shortcuts.render', 'render', (['request', '"""session/account_details.html"""', "{'form': form, 'account_form': account_form}"], {}), "(request, 'session/account_details.html', {'form': form,\n 'account_form': account_form})\n", (5487, 5578), False, 'from django.shortcuts import resolve_url, render, redirect\n'), ((7451, 7471), 'django.utils.encoding.force_bytes', 'force_bytes', (['user.pk'], {}), '(user.pk)\n', (7462, 7471), False, 'from django.utils.encoding import force_bytes\n')]
|
# coding: utf-8
# # Exploratory data analysis of TCGA mutation data
# In[1]:
import os
import numpy
import pandas
import seaborn
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Read TCGA datasets
# In[2]:
path = os.path.join('data', 'mutation-matrix.tsv.bz2')
mutation_df = pandas.read_table(path, index_col=0)
mutation_df.columns.name = 'entrez_gene_id'
mutation_df.shape
# In[3]:
path = os.path.join('data', 'samples.tsv')
sample_df = pandas.read_table(path)
sample_df.head(2)
# ## Distribution of mutations counts for genes
# In[4]:
gene_df = mutation_df.sum(axis='rows').rename('n_mutations').reset_index()
gene_df['n_mutations_log1p'] = numpy.log1p(gene_df.n_mutations)
gene_df.head(2)
# In[5]:
ax = seaborn.distplot(gene_df.n_mutations_log1p)
xticks = ax.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = ax.set_xticklabels(xticklabels)
# In[6]:
sum(gene_df.n_mutations == 0)
# ## Distribution of mutations counts for samples
# In[7]:
sample_df = sample_df.merge(
mutation_df.sum(axis='columns').rename('n_mutations').reset_index()
)
sample_df['n_mutations_log1p'] = numpy.log1p(sample_df.n_mutations)
sample_df.head(2)
# In[8]:
# Mutations per sample
ax = seaborn.distplot(sample_df.n_mutations_log1p)
xticks = ax.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = ax.set_xticklabels(xticklabels)
# ## Diagnosis age versus mutation count for samples
# In[9]:
grid = seaborn.jointplot('n_mutations_log1p', 'age_diagnosed', data=sample_df, kind='hex')
xticks = grid.ax_marg_x.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = grid.ax_marg_x.set_xticklabels(xticklabels)
# ## Mutation frequency by disease
# In[10]:
genes = mutation_df.columns.tolist()
verbose_mutation_df = sample_df.merge(mutation_df.reset_index())
mutation_freq_df = verbose_mutation_df.groupby('disease').apply(lambda df: df[genes].mean(axis='rows')).assign(
n_mutations = verbose_mutation_df.groupby('disease').apply(len)
)
mutation_freq_df.iloc[:3, :3]
# In[11]:
verbose_mutation_df.head()
# In[12]:
gene_subset = {
'7157': 'TP53', # tumor protein p53
'7428': 'VHL', # von Hippel-Lindau tumor suppressor
'29126': 'CD274', # CD274 molecule
'672': 'BRCA1', # BRCA1, DNA repair associated
'675': 'BRCA2', # BRCA2, DNA repair associated
'238': 'ALK', # anaplastic lymphoma receptor tyrosine kinase
'4221': 'MEN1', # menin 1
'5979': 'RET', # ret proto-oncogene
}
plot_df = (mutation_freq_df
.query("n_mutations > 100")
[list(gene_subset)]
.rename(columns=gene_subset)
)
# Convert to percent of max mutation rate for gene
plot_df = 100 * plot_df.divide(plot_df.max())
ax = seaborn.heatmap(plot_df)
|
[
"seaborn.heatmap",
"numpy.expm1",
"seaborn.distplot",
"seaborn.jointplot",
"pandas.read_table",
"os.path.join",
"numpy.log1p"
] |
[((234, 281), 'os.path.join', 'os.path.join', (['"""data"""', '"""mutation-matrix.tsv.bz2"""'], {}), "('data', 'mutation-matrix.tsv.bz2')\n", (246, 281), False, 'import os\n'), ((296, 332), 'pandas.read_table', 'pandas.read_table', (['path'], {'index_col': '(0)'}), '(path, index_col=0)\n', (313, 332), False, 'import pandas\n'), ((415, 450), 'os.path.join', 'os.path.join', (['"""data"""', '"""samples.tsv"""'], {}), "('data', 'samples.tsv')\n", (427, 450), False, 'import os\n'), ((463, 486), 'pandas.read_table', 'pandas.read_table', (['path'], {}), '(path)\n', (480, 486), False, 'import pandas\n'), ((673, 705), 'numpy.log1p', 'numpy.log1p', (['gene_df.n_mutations'], {}), '(gene_df.n_mutations)\n', (684, 705), False, 'import numpy\n'), ((740, 783), 'seaborn.distplot', 'seaborn.distplot', (['gene_df.n_mutations_log1p'], {}), '(gene_df.n_mutations_log1p)\n', (756, 783), False, 'import seaborn\n'), ((1151, 1185), 'numpy.log1p', 'numpy.log1p', (['sample_df.n_mutations'], {}), '(sample_df.n_mutations)\n', (1162, 1185), False, 'import numpy\n'), ((1245, 1290), 'seaborn.distplot', 'seaborn.distplot', (['sample_df.n_mutations_log1p'], {}), '(sample_df.n_mutations_log1p)\n', (1261, 1290), False, 'import seaborn\n'), ((1489, 1576), 'seaborn.jointplot', 'seaborn.jointplot', (['"""n_mutations_log1p"""', '"""age_diagnosed"""'], {'data': 'sample_df', 'kind': '"""hex"""'}), "('n_mutations_log1p', 'age_diagnosed', data=sample_df,\n kind='hex')\n", (1506, 1576), False, 'import seaborn\n'), ((2753, 2777), 'seaborn.heatmap', 'seaborn.heatmap', (['plot_df'], {}), '(plot_df)\n', (2768, 2777), False, 'import seaborn\n'), ((823, 842), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (834, 842), False, 'import numpy\n'), ((1330, 1349), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (1341, 1349), False, 'import numpy\n'), ((1624, 1643), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (1635, 1643), False, 'import numpy\n')]
|
import requests
from bs4 import BeautifulSoup
import json
import csv
class ProxyScraper:
results = []
def fetch(self, url):
"""Fetch data from url"""
return requests.get(url)
def parse(self, html):
"""Parse the result of fetch"""
soup = BeautifulSoup(html, 'html.parser')
table = soup.find('table')
rows = table.find_all('tr')
headers = [header.text for header in rows[0]]
self.results.append(headers)
for row in rows:
if len(row.find_all('td')) > 0:
data = [td.text for td in row]
self.results.append(data)
def to_csv(self):
"""Export parsed data to csv"""
with open('proxy_list.csv', 'w') as output:
writer = csv.writer(output)
writer.writerows(self.results)
def run(self):
"""Run the scraper"""
res = self.fetch('https://www.free-proxy-list.net/')
self.parse(res.text)
self.to_csv()
if __name__ == '__main__':
scraper = ProxyScraper()
scraper.run()
|
[
"bs4.BeautifulSoup",
"csv.writer",
"requests.get"
] |
[((184, 201), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (196, 201), False, 'import requests\n'), ((285, 319), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (298, 319), False, 'from bs4 import BeautifulSoup\n'), ((780, 798), 'csv.writer', 'csv.writer', (['output'], {}), '(output)\n', (790, 798), False, 'import csv\n')]
|
"""
Ipc Transmitter Module
"""
import win32api, win32pipe, win32file, pywintypes
import json
class IpcOutHandler:
__pipe_out__ = None # パイプ(内部保持)
__is_initialize__ = False # 初期化されたかどうか
__is_finalize__ = False # 破棄されたかどうか
__seqno_counter__ = 0 # seqNo用のカウンタ
### --- コンストラクタ --- ###
def __init__(self):
""" コンストラクタです。
"""
__is_initialize__ = False
__is_finalize__ = False
### --- 以下メンバ変数の参照メソッド --- ###
def IsInitialized(self):
"""
出力パイプが初期化されているかを表します。
:param self:
"""
return self.__is_initialize__
def IsFinalized(self):
"""
出力パイプが破棄されているかを表します。
:param self:
"""
return self.__is_finalize__
### --- 以下操作を表すメソッド --- ###
def Initialize(self):
"""
出力パイプを初期化します。
:param self:
"""
try:
if(not(self.__is_initialize__) and not(self.__is_finalize__)):
# パイプ作成
self.__pipe_out__ = win32pipe.CreateNamedPipe(r'\\.\pipe\biassys_pipein', win32pipe.PIPE_ACCESS_OUTBOUND, win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE, 1, 65536, 0, 10000, None)
if self.__pipe_out__ != None:
if win32pipe.ConnectNamedPipe(self.__pipe_out__) == 0: # パイプ接続確認
self.__is_initialize__ = True
return True
except pywintypes.error:
return False
return False
def Finalize(self):
"""
出力パイプを破棄します。
:param self:
"""
try:
if self.__is_initialize__ and not(self.__is_finalize__):
if self.__pipe_out__ != None:
# パイプ閉じる
win32pipe.DisconnectNamedPipe(self.__pipe_out__)
self.__pipe_out__.close()
self.__is_finalize__ = True
return True
except pywintypes.error:
return False
return False
def SendByBlocking(self, commname, seqno, commtype, commparam):
"""
コマンド名、コマンドの種類を転送して、パイプによりパケットを転送します。
Arguments:
commname {[type]} -- [description]
seqno -- < 0ならばauto increment, >= 0ならばconstant value
commtype {[type]} -- [description]
commparam {[type]} -- [description]
Returns:
[type] -- [description]
"""
# エラーチェック:引数Noneは返り値を付けて終了
if commname == None or commtype == None or commparam == None:
return -1
# メッセージ生成
jsonmsg = None
if(seqno >= 0):
jsonmsg = json.dumps({'command':commname, 'seqNo': seqno, 'type': commtype, 'param' : commparam}, ensure_ascii=False)
else:
jsonmsg = json.dumps({'command':commname, 'seqNo': self.__seqno_counter__, 'type': commtype, 'param' : commparam}, ensure_ascii=False)
self.__seqno_counter__ = self.__seqno_counter__ + 1
# データ書込み処理
try:
recvjsonmsg = win32file.WriteFile(self.__pipe_out__, jsonmsg.encode('utf-8'))
except pywintypes.error:
return -2
return 0
|
[
"win32pipe.CreateNamedPipe",
"win32pipe.DisconnectNamedPipe",
"win32pipe.ConnectNamedPipe",
"json.dumps"
] |
[((2808, 2919), 'json.dumps', 'json.dumps', (["{'command': commname, 'seqNo': seqno, 'type': commtype, 'param': commparam}"], {'ensure_ascii': '(False)'}), "({'command': commname, 'seqNo': seqno, 'type': commtype, 'param':\n commparam}, ensure_ascii=False)\n", (2818, 2919), False, 'import json\n'), ((2952, 3080), 'json.dumps', 'json.dumps', (["{'command': commname, 'seqNo': self.__seqno_counter__, 'type': commtype,\n 'param': commparam}"], {'ensure_ascii': '(False)'}), "({'command': commname, 'seqNo': self.__seqno_counter__, 'type':\n commtype, 'param': commparam}, ensure_ascii=False)\n", (2962, 3080), False, 'import json\n'), ((1112, 1283), 'win32pipe.CreateNamedPipe', 'win32pipe.CreateNamedPipe', (['"""\\\\\\\\.\\\\pipe\\\\biassys_pipein"""', 'win32pipe.PIPE_ACCESS_OUTBOUND', '(win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE)', '(1)', '(65536)', '(0)', '(10000)', 'None'], {}), "('\\\\\\\\.\\\\pipe\\\\biassys_pipein', win32pipe.\n PIPE_ACCESS_OUTBOUND, win32pipe.PIPE_WAIT | win32pipe.PIPE_TYPE_BYTE, 1,\n 65536, 0, 10000, None)\n", (1137, 1283), False, 'import win32api, win32pipe, win32file, pywintypes\n'), ((1874, 1922), 'win32pipe.DisconnectNamedPipe', 'win32pipe.DisconnectNamedPipe', (['self.__pipe_out__'], {}), '(self.__pipe_out__)\n', (1903, 1922), False, 'import win32api, win32pipe, win32file, pywintypes\n'), ((1358, 1403), 'win32pipe.ConnectNamedPipe', 'win32pipe.ConnectNamedPipe', (['self.__pipe_out__'], {}), '(self.__pipe_out__)\n', (1384, 1403), False, 'import win32api, win32pipe, win32file, pywintypes\n')]
|
import factory
from .models import Group
class GroupFactory(factory.DjangoModelFactory):
remote_id = factory.Sequence(lambda n: n)
class Meta:
model = Group
|
[
"factory.Sequence"
] |
[((109, 138), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n)'], {}), '(lambda n: n)\n', (125, 138), False, 'import factory\n')]
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
new=np.concatenate((data,new_record),axis=0)
age=new[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
zero=data[data[:,2]==0]
race_0=zero[:,2]
one=data[data[:,2]==1]
race_1=one[:,2]
two=data[data[:,2]==2]
race_2=two[:,2]
three=data[data[:,2]==3]
race_3=three[:,2]
four=data[data[:,2]==4]
race_4=four[:,2]
len_1=race_1.itemsize
len_2=race_2.itemsize
len_0=race_0.itemsize
len_3=race_3.itemsize
len_4=race_4.itemsize
#Code starts here
value=np.array([len_0,len_1,len_2,len_3,len_4])
minority_race=np.min(value)
senior_citizens=new[new[:,0]>60]
working_hours=senior_citizens[:,6]
working_hours_sum=np.sum(working_hours)
senior_citizens_len=senior_citizens.itemsize
avg_working_hours=np.mean(working_hours)
high=new[new[:,1]>10]
low=new[new[:,1]<=10]
avg_pay_low=np.mean(low[:,7])
avg_pay_high=np.mean(high[:,7])
|
[
"numpy.sum",
"warnings.filterwarnings",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] |
[((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((258, 300), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {'axis': '(0)'}), '((data, new_record), axis=0)\n', (272, 300), True, 'import numpy as np\n'), ((322, 333), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (328, 333), True, 'import numpy as np\n'), ((343, 354), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (349, 354), True, 'import numpy as np\n'), ((365, 377), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (372, 377), True, 'import numpy as np\n'), ((387, 398), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (393, 398), True, 'import numpy as np\n'), ((753, 798), 'numpy.array', 'np.array', (['[len_0, len_1, len_2, len_3, len_4]'], {}), '([len_0, len_1, len_2, len_3, len_4])\n', (761, 798), True, 'import numpy as np\n'), ((810, 823), 'numpy.min', 'np.min', (['value'], {}), '(value)\n', (816, 823), True, 'import numpy as np\n'), ((913, 934), 'numpy.sum', 'np.sum', (['working_hours'], {}), '(working_hours)\n', (919, 934), True, 'import numpy as np\n'), ((1000, 1022), 'numpy.mean', 'np.mean', (['working_hours'], {}), '(working_hours)\n', (1007, 1022), True, 'import numpy as np\n'), ((1082, 1100), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1089, 1100), True, 'import numpy as np\n'), ((1114, 1133), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1121, 1133), True, 'import numpy as np\n')]
|
from django.contrib import admin
from core.models import Universe, Year
class UniverseAdmin(admin.ModelAdmin):
list_display = ('name',)
class YearAdmin(admin.ModelAdmin):
list_display = ('year', 'current_year', 'universe')
admin.site.register(Universe, UniverseAdmin)
admin.site.register(Year, YearAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((239, 283), 'django.contrib.admin.site.register', 'admin.site.register', (['Universe', 'UniverseAdmin'], {}), '(Universe, UniverseAdmin)\n', (258, 283), False, 'from django.contrib import admin\n'), ((284, 320), 'django.contrib.admin.site.register', 'admin.site.register', (['Year', 'YearAdmin'], {}), '(Year, YearAdmin)\n', (303, 320), False, 'from django.contrib import admin\n')]
|
#Contains tables, every model represents a table in the database
from .database import Base
from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean
from sqlalchemy.sql.expression import text
class Post(Base):
__tablename__ = "posts"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String, nullable=False)
content = Column(String, nullable=False)
published = Column(Boolean, server_default="TRUE", nullable=False)
created_at = Column(TIMESTAMP(timezone=True), nullable=False, server_default=text("now()"))
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
email = Column(String, nullable=False, unique=True)
password = Column(String, nullable=False)
created_at = Column(TIMESTAMP(timezone=True), nullable=False, server_default=text('now()'))
|
[
"sqlalchemy.sql.expression.text",
"sqlalchemy.TIMESTAMP",
"sqlalchemy.Column"
] |
[((259, 308), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'nullable': '(False)'}), '(Integer, primary_key=True, nullable=False)\n', (265, 308), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((321, 351), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (327, 351), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((366, 396), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (372, 396), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((413, 467), 'sqlalchemy.Column', 'Column', (['Boolean'], {'server_default': '"""TRUE"""', 'nullable': '(False)'}), "(Boolean, server_default='TRUE', nullable=False)\n", (419, 467), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((623, 656), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (629, 656), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((669, 712), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)', 'unique': '(True)'}), '(String, nullable=False, unique=True)\n', (675, 712), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((728, 758), 'sqlalchemy.Column', 'Column', (['String'], {'nullable': '(False)'}), '(String, nullable=False)\n', (734, 758), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((493, 517), 'sqlalchemy.TIMESTAMP', 'TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (502, 517), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((783, 807), 'sqlalchemy.TIMESTAMP', 'TIMESTAMP', ([], {'timezone': '(True)'}), '(timezone=True)\n', (792, 807), False, 'from sqlalchemy import TIMESTAMP, Column, Integer, String, Boolean\n'), ((550, 563), 'sqlalchemy.sql.expression.text', 'text', (['"""now()"""'], {}), "('now()')\n", (554, 563), False, 'from sqlalchemy.sql.expression import text\n'), ((840, 853), 'sqlalchemy.sql.expression.text', 'text', (['"""now()"""'], {}), "('now()')\n", (844, 853), False, 'from sqlalchemy.sql.expression import text\n')]
|
""" A collection of common routines for plotting ones """
import time
import matplotlib.pyplot as plt
import numpy as np
from lamberthub.utils.misc import _get_sample_vectors_from_theta_and_rho
class TauThetaPlotter:
"""A class for modelling a discrete grid contour plotter."""
def __init__(self, ax=None, fig=None, Nres=50):
"""
Initializes any instance of the plotter.
Parameters
----------
func: function
The core function which provides the results to be shown.
ax: matplotlib.Axes
The axes in which the lines will be drawn.
fig: matplotlib.Figure
The figure instance for the plot.
Nres: int
Number of total elements
"""
# Check if axes are available
if ax is None:
_, ax = plt.subplots()
# Check if figure available
if fig is None:
fig, _ = plt.subplots()
# Assign previous figure and axes. Impose equal aspect ratio.
self.ax, self.fig = ax, fig
self.ax.set_aspect("equal")
# Assign the number of points per row and column
self.Nres = Nres
def _get_spans(self, p=0.999):
"""
Returns a lineal span for transfer angle and non-dimensional time of flight.
Parameters
----------
p: float
Percentage of the final value. This is required due to singularities
in some of the solvers at transfer angles of 2pi.
Returns
-------
theta_span: np.array
An array of linearly spaced transfer angles.
tau_span: np.array
An array of linearly spaced non-dimensional transfer times.
"""
# Generate a meshgrid holding any combination of transfer angle and
# non-dimensional time of flight. The 2 * pi value is avoided by
# computing an approximate one. Nevertheless, this last value will not
# be used due to the way `pcolor` function operates.
theta_span, tau_span = [
np.linspace(0, p * 2 * np.pi, self.Nres) for _ in range(2)
]
return theta_span, tau_span
def _draw_colorbar(self, maxval, step, label, cmap, color_vmin):
"""Draws the colorbar for the figure.
Parameters
----------
maxval: float
The maximum value of the figure.
step: float
The step for drawing each of the colorbar ticks.
label: str
The title of the colorbar.
cmap: matplotlib.cmap
The colormap used in the contour plot.
"""
# Generate the colorbar
self.cbar = self.fig.colorbar(self.collection)
self.cbar.ax.get_yaxis().set_ticks([])
# Append the label and make its position
self.cbar.set_label(label)
self.cbar.ax.get_yaxis().labelpad = 15
# Properly size the aspect ratio of the colorbar
digits = int(np.log10(maxval)) + 1
cbar_title = r"$\times 10^" + f"{digits-2}$" if digits > 2 else None
self.cbar.ax.set_title(cbar_title)
# Compute the step which separates two different levels
step = maxval / cmap.N
# Draw a beautiful colorbar with the legend for the number of iterations
# in the middle
for n in range(int(cmap.N)):
# Select suitable font-color
fontcolor = "black" if n != 0 else color_vmin
# Draw the number within the scale
self.cbar.ax.text(
0.5 * maxval,
step / 2 + step * n,
str(int(step * n / 10 ** (digits - 2))),
ha="center",
va="center",
color=fontcolor,
)
def _draw_ticks(self):
"""Draws the ticks within the axes"""
# Set the X-ticks
self.ax.set_xticks(np.array([0, 0.5, 1, 1.5, 2]) * np.pi)
self.ax.set_xticklabels(
[r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{2\pi}{3}$", r"$2\pi$"]
)
# Set the Y-ticks
self.ax.set_yticks(np.array([0, 0.5, 1, 1.5, 2]) * np.pi)
self.ax.set_yticklabels(
[r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{2\pi}{3}$", r"$2\pi$"]
)
def _draw_labels(self):
"""Draws axes labels"""
# Set axes labels and title
self.ax.set_xlabel(r"$\Delta \theta$")
self.ax.set_ylabel(r"$\Delta \tau$")
def _measure_performance(solver, theta, tau):
"""
Computes the number of iterations from a particular value of theta and the
transfer angle.
Parameters
----------
solver: function
The Lambert's problem solver function.
theta: float
The transfer angle.
tau: float
The non-dimensional time of flight.
Returns
-------
Notes
-----
The customization is null to prevent users from shooting themselves and
creating performance comparisons under different boundary conditions.
"""
# Generate r1_vec and r2_vec such that r2_norm = 2 * r1_norm for various theta
r1_vec, r2_vec = _get_sample_vectors_from_theta_and_rho(theta, 2.0)
# Compute the norms, the chord and semi-perimeter
r1, r2 = [np.linalg.norm(rr) for rr in [r1_vec, r2_vec]]
c = (r1 ** 2 + r2 ** 2 - 2 * r1 * r2 * np.cos(theta)) ** 0.5
s = (r1 + r2 + c) / 2
# Compute the dimensional time from the non-dimensional one using
# Lancaster's expression. This relation is more intuitive as it relates
# revolution cases with multiples of pi.
mu = 1.00
tof = tau / (8 * mu / s ** 3) ** 0.5
# Filter non-valid input: null value is returned if no iterations were run
if tof == 0 or theta == 0:
return 0, 0, 0
# Solve the problem but only collect the number of iterations
tic = time.perf_counter()
try:
_, _, numiter, tpi = solver(
mu,
r1_vec,
r2_vec,
tof,
M=0,
prograde=True,
maxiter=35,
atol=1e-5,
rtol=1e-7,
full_output=True,
)
tac = time.perf_counter()
except ValueError:
numiter, tpi, tic, tac = (0, 0, 0, 0)
return numiter, tpi, (tac - tic)
# Vectorize the solver
_vec_measure_performance = np.vectorize(
_measure_performance, otypes=[np.ndarray, np.ndarray, np.ndarray], excluded=[0]
)
|
[
"numpy.vectorize",
"time.perf_counter",
"lamberthub.utils.misc._get_sample_vectors_from_theta_and_rho",
"numpy.linalg.norm",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.log10",
"matplotlib.pyplot.subplots"
] |
[((6346, 6444), 'numpy.vectorize', 'np.vectorize', (['_measure_performance'], {'otypes': '[np.ndarray, np.ndarray, np.ndarray]', 'excluded': '[0]'}), '(_measure_performance, otypes=[np.ndarray, np.ndarray, np.\n ndarray], excluded=[0])\n', (6358, 6444), True, 'import numpy as np\n'), ((5144, 5194), 'lamberthub.utils.misc._get_sample_vectors_from_theta_and_rho', '_get_sample_vectors_from_theta_and_rho', (['theta', '(2.0)'], {}), '(theta, 2.0)\n', (5182, 5194), False, 'from lamberthub.utils.misc import _get_sample_vectors_from_theta_and_rho\n'), ((5860, 5879), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5877, 5879), False, 'import time\n'), ((5264, 5282), 'numpy.linalg.norm', 'np.linalg.norm', (['rr'], {}), '(rr)\n', (5278, 5282), True, 'import numpy as np\n'), ((6167, 6186), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((843, 857), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (855, 857), True, 'import matplotlib.pyplot as plt\n'), ((940, 954), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (952, 954), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2119), 'numpy.linspace', 'np.linspace', (['(0)', '(p * 2 * np.pi)', 'self.Nres'], {}), '(0, p * 2 * np.pi, self.Nres)\n', (2090, 2119), True, 'import numpy as np\n'), ((2985, 3001), 'numpy.log10', 'np.log10', (['maxval'], {}), '(maxval)\n', (2993, 3001), True, 'import numpy as np\n'), ((3902, 3931), 'numpy.array', 'np.array', (['[0, 0.5, 1, 1.5, 2]'], {}), '([0, 0.5, 1, 1.5, 2])\n', (3910, 3931), True, 'import numpy as np\n'), ((4121, 4150), 'numpy.array', 'np.array', (['[0, 0.5, 1, 1.5, 2]'], {}), '([0, 0.5, 1, 1.5, 2])\n', (4129, 4150), True, 'import numpy as np\n'), ((5354, 5367), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5360, 5367), True, 'import numpy as np\n')]
|
# shows album info for a URN or URL
import spotipy
import sys
import pprint
if len(sys.argv) > 1:
urn = sys.argv[1]
else:
urn = 'spotify:album:5yTx83u3qerZF7GRJu7eFk'
sp = spotipy.Spotify()
album = sp.album(urn)
pprint.pprint(album)
|
[
"spotipy.Spotify",
"pprint.pprint"
] |
[((185, 202), 'spotipy.Spotify', 'spotipy.Spotify', ([], {}), '()\n', (200, 202), False, 'import spotipy\n'), ((225, 245), 'pprint.pprint', 'pprint.pprint', (['album'], {}), '(album)\n', (238, 245), False, 'import pprint\n')]
|
"""Script to import/export all the skeleton related objects."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2012, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import os
import bpy
import mathutils
from pyffi.formats.nif import NifFormat
import io_scene_niftools.utils.logging
from io_scene_niftools.modules.nif_import.object.block_registry import block_store
from io_scene_niftools.modules.nif_export.block_registry import block_store as block_store_export
from io_scene_niftools.modules.nif_import.animation.transform import TransformAnimation
from io_scene_niftools.modules.nif_import.object import Object
from io_scene_niftools.utils import math
from io_scene_niftools.utils.blocks import safe_decode
from io_scene_niftools.utils.logging import NifLog
from io_scene_niftools.utils.singleton import NifOp, NifData
class Armature:
def __init__(self):
self.transform_anim = TransformAnimation()
# to get access to the nif bone in object mode
self.name_to_block = {}
self.pose_store = {}
self.bind_store = {}
self.skinned = False
self.n_armature = None
def store_pose_matrices(self, n_node, n_root):
"""Stores the nif armature space matrix of a node tree"""
# check that n_block is indeed a bone
if not self.is_bone(n_node):
return None
NifLog.debug(f"Storing pose matrix for {n_node.name}")
# calculate the transform relative to root, ie. turn nif local into nif armature space
self.pose_store[n_node] = n_node.get_transform(n_root)
# move down the hierarchy
for n_child in n_node.children:
self.store_pose_matrices(n_child, n_root)
def get_skinned_geometries(self, n_root):
"""Yield all children in n_root's tree that have skinning"""
# search for all NiTriShape or NiTriStrips blocks...
for n_block in n_root.tree(block_type=NifFormat.NiTriBasedGeom):
# yes, we found one, does it have skinning?
if n_block.is_skin():
yield n_block
def get_skin_bind(self, n_bone, geom, n_root):
"""Get armature space bind matrix for skin partition bone's inverse bind matrix"""
# get the bind pose from the skin data
# NiSkinData stores the inverse bind (=rest) pose for each bone, in armature space
# for ZT2 elephant, the skin transform is the inverse of the geom's armature space transform
# this gives a straight rest pose for MW too
# return n_bone.get_transform().get_inverse(fast=False) * geom.skin_instance.data.get_transform().get_inverse(fast=False)
# however, this conflicts with send_geometries_to_bind_position for MW meshes, so stick to this now
return n_bone.get_transform().get_inverse(fast=False) * geom.get_transform(n_root)
def bones_iter(self, skin_instance):
# might want to make sure that bone_list includes no dupes too to avoid breaking the first mesh
for bonenode, bonedata in zip(skin_instance.bones, skin_instance.data.bone_list):
# bonenode can be None; see pyffi issue #3114079
if bonenode:
yield bonenode, bonedata
def store_bind_matrices(self, n_armature):
"""Process all geometries' skin instances to reconstruct a bind pose from their inverse bind matrices"""
# improved from pyffi's send_geometries_to_bind_position & send_bones_to_bind_position
NifLog.debug(f"Calculating bind for {n_armature.name}")
# prioritize geometries that have most nodes in their skin instance
geoms = sorted(self.get_skinned_geometries(n_armature), key=lambda g: g.skin_instance.num_bones, reverse=True)
NifLog.debug(f"Found {len(geoms)} skinned geometries")
for geom in geoms:
NifLog.debug(f"Checking skin of {geom.name}")
skininst = geom.skin_instance
for bonenode, bonedata in self.bones_iter(skininst):
# make sure all bone data of shared bones coincides
# see if the bone has been used by a previous skin instance
if bonenode in self.bind_store:
# get the bind pose that has been stored
diff = (bonedata.get_transform()
* self.bind_store[bonenode]
# * geom.skin_instance.data.get_transform()) use this if relative to skin instead of geom
* geom.get_transform(n_armature).get_inverse(fast=False))
# there is a difference between the two geometries' bind poses
if not diff.is_identity():
NifLog.debug(f"Fixing {geom.name} bind position")
# update the skin for all bones of the new geom
for bonenode, bonedata in self.bones_iter(skininst):
NifLog.debug(f"Transforming bind of {bonenode.name}")
bonedata.set_transform(diff.get_inverse(fast=False) * bonedata.get_transform())
# transforming verts helps with nifs where the skins differ, eg MW vampire or WLP2 Gastornis
for vert in geom.data.vertices:
newvert = vert * diff
vert.x = newvert.x
vert.y = newvert.y
vert.z = newvert.z
for norm in geom.data.normals:
newnorm = norm * diff.get_matrix_33()
norm.x = newnorm.x
norm.y = newnorm.y
norm.z = newnorm.z
break
# store bind pose
for bonenode, bonedata in self.bones_iter(skininst):
NifLog.debug(f"Stored {geom.name} bind position")
self.bind_store[bonenode] = self.get_skin_bind(bonedata, geom, n_armature)
NifLog.debug("Storing non-skeletal bone poses")
self.fix_pose(n_armature, n_armature)
def fix_pose(self, n_node, n_root):
"""reposition non-skeletal bones to maintain their local orientation to their skeletal parents"""
for n_child_node in n_node.children:
# only process nodes
if not isinstance(n_child_node, NifFormat.NiNode):
continue
if n_child_node not in self.bind_store and n_child_node in self.pose_store:
NifLog.debug(f"Calculating bind pose for non-skeletal bone {n_child_node.name}")
# get matrices for n_node (the parent) - fallback to getter if it is not in the store
n_armature_pose = self.pose_store.get(n_node, n_node.get_transform(n_root))
# get bind of parent node or pose if it has no bind pose
n_armature_bind = self.bind_store.get(n_node, n_armature_pose)
# the child must have a pose, no need for a fallback
n_child_armature_pose = self.pose_store[n_child_node]
# get the relative transform of n_child_node from pose * inverted parent pose
n_child_local_pose = n_child_armature_pose * n_armature_pose.get_inverse(fast=False)
# get object space transform by multiplying with bind of parent bone
self.bind_store[n_child_node] = n_child_local_pose * n_armature_bind
# move down the hierarchy
self.fix_pose(n_child_node, n_root)
def import_armature(self, n_armature):
"""Scans an armature hierarchy, and returns a whole armature.
This is done outside the normal node tree scan to allow for positioning
of the bones before skins are attached."""
armature_name = block_store.import_name(n_armature)
b_armature_data = bpy.data.armatures.new(armature_name)
b_armature_data.display_type = 'STICK'
# use heuristics to determine a suitable orientation
forward, up = self.guess_orientation(n_armature)
# pass them to the matrix utility
math.set_bone_orientation(forward, up)
# store axis orientation for export
b_armature_data.niftools.axis_forward = forward
b_armature_data.niftools.axis_up = up
b_armature_obj = Object.create_b_obj(n_armature, b_armature_data)
b_armature_obj.show_in_front = True
# store the original pose & bind matrices for all nodes
self.pose_store = {}
self.bind_store = {}
self.store_pose_matrices(n_armature, n_armature)
self.store_bind_matrices(n_armature)
# make armature editable and create bones
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
for n_child in n_armature.children:
self.import_bone_bind(n_child, b_armature_data, n_armature)
self.fix_bone_lengths(b_armature_data)
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
# The armature has been created in editmode,
# now we are ready to set the bone keyframes and store the bones' long names.
if NifOp.props.animation:
self.transform_anim.get_bind_data(b_armature_obj)
for bone_name, b_bone in b_armature_obj.data.bones.items():
n_block = self.name_to_block[bone_name]
# the property is only available from object mode!
block_store.store_longname(b_bone, safe_decode(n_block.name))
if NifOp.props.animation:
self.transform_anim.import_transforms(n_block, b_armature_obj, bone_name)
# import pose
for b_name, n_block in self.name_to_block.items():
n_pose = self.pose_store[n_block]
b_pose_bone = b_armature_obj.pose.bones[b_name]
n_bind = math.nifformat_to_mathutils_matrix(n_pose)
b_pose_bone.matrix = math.nif_bind_to_blender_bind(n_bind)
# force update is required after each pbone to ensure the transforms are set properly in blender
bpy.context.view_layer.update()
return b_armature_obj
def import_bone_bind(self, n_block, b_armature_data, n_armature, b_parent_bone=None):
"""Adds a bone to the armature in edit mode."""
# check that n_block is indeed a bone
if not self.is_bone(n_block):
return None
# bone name
bone_name = block_store.import_name(n_block)
# create a new bone
b_edit_bone = b_armature_data.edit_bones.new(bone_name)
# store nif block for access from object mode
self.name_to_block[b_edit_bone.name] = n_block
# get the nif bone's armature space matrix (under the hood all bone space matrixes are multiplied together)
n_bind = math.nifformat_to_mathutils_matrix(self.bind_store.get(n_block, NifFormat.Matrix44()))
# get transformation in blender's coordinate space
b_bind = math.nif_bind_to_blender_bind(n_bind)
# set the bone matrix - but set the tail first to prevent issues with zero-length bone
b_edit_bone.tail = mathutils.Vector([0, 0, 1])
b_edit_bone.matrix = b_bind
# link to parent
if b_parent_bone:
b_edit_bone.parent = b_parent_bone
# import and parent bone children
for n_child in n_block.children:
self.import_bone_bind(n_child, b_armature_data, n_armature, b_edit_bone)
def guess_orientation(self, n_armature):
"""Analyze all bones' translations to see what the nif considers the 'forward' axis"""
axis_indices = []
ids = ["X", "Y", "Z", "-X", "-Y", "-Z"]
for n_child in n_armature.children:
self.get_forward_axis(n_child, axis_indices)
# the forward index is the most common one from the list
forward_ind = max(set(axis_indices), key=axis_indices.count)
# move the up index one coordinate to the right, account for end of list
up_ind = (forward_ind + 1) % len(ids)
# return string identifiers
return ids[forward_ind], ids[up_ind]
@staticmethod
def argmax(values):
"""Return the index of the max value in values"""
return max(zip(values, range(len(values))))[1]
def get_forward_axis(self, n_bone, axis_indices):
"""Helper function to get the forward axis of a bone"""
# check that n_block is indeed a bone
if not self.is_bone(n_bone):
return None
trans = n_bone.translation.as_tuple()
trans_abs = tuple(abs(v) for v in trans)
# get the index of the coordinate with the biggest absolute value
max_coord_ind = self.argmax(trans_abs)
# now check the sign
actual_value = trans[max_coord_ind]
# handle sign accordingly so negative indices map to the negative identifiers in list
if actual_value < 0:
max_coord_ind += 3
axis_indices.append(max_coord_ind)
# move down the hierarchy
for n_child in n_bone.children:
self.get_forward_axis(n_child, axis_indices)
@staticmethod
def fix_bone_lengths(b_armature_data):
"""Sets all edit_bones to a suitable length."""
for b_edit_bone in b_armature_data.edit_bones:
# don't change root bones
if b_edit_bone.parent:
# take the desired length from the mean of all children's heads
if b_edit_bone.children:
child_heads = mathutils.Vector()
for b_child in b_edit_bone.children:
child_heads += b_child.head
bone_length = (b_edit_bone.head - child_heads / len(b_edit_bone.children)).length
if bone_length < 0.01:
bone_length = 0.25
# end of a chain
else:
bone_length = b_edit_bone.parent.length
b_edit_bone.length = bone_length
def check_for_skin(self, n_root):
"""Checks all tri geometry for skinning, sets self.skinned accordingly"""
# set these here once per run
self.n_armature = None
self.skinned = False
for n_block in self.get_skinned_geometries(n_root):
self.skinned = True
NifLog.debug(f"{n_block.name} has skinning.")
# one is enough to require an armature, so stop
return
# force import of nodes as bones, even if no geometries are present
if NifOp.props.process == "SKELETON_ONLY":
self.skinned = True
NifLog.debug(f"Found no skinned geometries.")
def is_bone(self, ni_block):
"""Tests a NiNode to see if it has been marked as a bone."""
if isinstance(ni_block, NifFormat.NiNode):
return self.skinned
def is_armature_root(self, n_block):
"""Tests a block to see if it's an armature."""
if isinstance(n_block, NifFormat.NiNode):
# we have skinning and are waiting for a suitable start node of the tree
if self.skinned and not self.n_armature:
# now store it as the nif armature's root
self.n_armature = n_block
return True
|
[
"io_scene_niftools.utils.math.nifformat_to_mathutils_matrix",
"io_scene_niftools.utils.math.set_bone_orientation",
"io_scene_niftools.modules.nif_import.animation.transform.TransformAnimation",
"io_scene_niftools.utils.blocks.safe_decode",
"pyffi.formats.nif.NifFormat.Matrix44",
"bpy.data.armatures.new",
"io_scene_niftools.modules.nif_import.object.Object.create_b_obj",
"bpy.context.view_layer.update",
"io_scene_niftools.modules.nif_import.object.block_registry.block_store.import_name",
"mathutils.Vector",
"io_scene_niftools.utils.math.nif_bind_to_blender_bind",
"bpy.ops.object.mode_set",
"io_scene_niftools.utils.logging.NifLog.debug"
] |
[((2511, 2531), 'io_scene_niftools.modules.nif_import.animation.transform.TransformAnimation', 'TransformAnimation', ([], {}), '()\n', (2529, 2531), False, 'from io_scene_niftools.modules.nif_import.animation.transform import TransformAnimation\n'), ((2970, 3024), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Storing pose matrix for {n_node.name}"""'], {}), "(f'Storing pose matrix for {n_node.name}')\n", (2982, 3024), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((5073, 5128), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Calculating bind for {n_armature.name}"""'], {}), "(f'Calculating bind for {n_armature.name}')\n", (5085, 5128), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((7558, 7605), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['"""Storing non-skeletal bone poses"""'], {}), "('Storing non-skeletal bone poses')\n", (7570, 7605), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((9357, 9392), 'io_scene_niftools.modules.nif_import.object.block_registry.block_store.import_name', 'block_store.import_name', (['n_armature'], {}), '(n_armature)\n', (9380, 9392), False, 'from io_scene_niftools.modules.nif_import.object.block_registry import block_store\n'), ((9419, 9456), 'bpy.data.armatures.new', 'bpy.data.armatures.new', (['armature_name'], {}), '(armature_name)\n', (9441, 9456), False, 'import bpy\n'), ((9673, 9711), 'io_scene_niftools.utils.math.set_bone_orientation', 'math.set_bone_orientation', (['forward', 'up'], {}), '(forward, up)\n', (9698, 9711), False, 'from io_scene_niftools.utils import math\n'), ((9883, 9931), 'io_scene_niftools.modules.nif_import.object.Object.create_b_obj', 'Object.create_b_obj', (['n_armature', 'b_armature_data'], {}), '(n_armature, b_armature_data)\n', (9902, 9931), False, 'from io_scene_niftools.modules.nif_import.object import Object\n'), ((10260, 10310), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""', 'toggle': '(False)'}), "(mode='EDIT', toggle=False)\n", (10283, 10310), False, 'import bpy\n'), ((10482, 10534), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""', 'toggle': '(False)'}), "(mode='OBJECT', toggle=False)\n", (10505, 10534), False, 'import bpy\n'), ((11959, 11991), 'io_scene_niftools.modules.nif_import.object.block_registry.block_store.import_name', 'block_store.import_name', (['n_block'], {}), '(n_block)\n', (11982, 11991), False, 'from io_scene_niftools.modules.nif_import.object.block_registry import block_store\n'), ((12489, 12526), 'io_scene_niftools.utils.math.nif_bind_to_blender_bind', 'math.nif_bind_to_blender_bind', (['n_bind'], {}), '(n_bind)\n', (12518, 12526), False, 'from io_scene_niftools.utils import math\n'), ((12650, 12677), 'mathutils.Vector', 'mathutils.Vector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (12666, 12677), False, 'import mathutils\n'), ((16133, 16178), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Found no skinned geometries."""'], {}), "(f'Found no skinned geometries.')\n", (16145, 16178), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((5426, 5471), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Checking skin of {geom.name}"""'], {}), "(f'Checking skin of {geom.name}')\n", (5438, 5471), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((11366, 11408), 'io_scene_niftools.utils.math.nifformat_to_mathutils_matrix', 'math.nifformat_to_mathutils_matrix', (['n_pose'], {}), '(n_pose)\n', (11400, 11408), False, 'from io_scene_niftools.utils import math\n'), ((11442, 11479), 'io_scene_niftools.utils.math.nif_bind_to_blender_bind', 'math.nif_bind_to_blender_bind', (['n_bind'], {}), '(n_bind)\n', (11471, 11479), False, 'from io_scene_niftools.utils import math\n'), ((11601, 11632), 'bpy.context.view_layer.update', 'bpy.context.view_layer.update', ([], {}), '()\n', (11630, 11632), False, 'import bpy\n'), ((15841, 15886), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""{n_block.name} has skinning."""'], {}), "(f'{n_block.name} has skinning.')\n", (15853, 15886), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((7408, 7457), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Stored {geom.name} bind position"""'], {}), "(f'Stored {geom.name} bind position')\n", (7420, 7457), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((8069, 8154), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Calculating bind pose for non-skeletal bone {n_child_node.name}"""'], {}), "(f'Calculating bind pose for non-skeletal bone {n_child_node.name}'\n )\n", (8081, 8154), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((11002, 11027), 'io_scene_niftools.utils.blocks.safe_decode', 'safe_decode', (['n_block.name'], {}), '(n_block.name)\n', (11013, 11027), False, 'from io_scene_niftools.utils.blocks import safe_decode\n'), ((12390, 12410), 'pyffi.formats.nif.NifFormat.Matrix44', 'NifFormat.Matrix44', ([], {}), '()\n', (12408, 12410), False, 'from pyffi.formats.nif import NifFormat\n'), ((15038, 15056), 'mathutils.Vector', 'mathutils.Vector', ([], {}), '()\n', (15054, 15056), False, 'import mathutils\n'), ((6300, 6349), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Fixing {geom.name} bind position"""'], {}), "(f'Fixing {geom.name} bind position')\n", (6312, 6349), False, 'from io_scene_niftools.utils.logging import NifLog\n'), ((6527, 6580), 'io_scene_niftools.utils.logging.NifLog.debug', 'NifLog.debug', (['f"""Transforming bind of {bonenode.name}"""'], {}), "(f'Transforming bind of {bonenode.name}')\n", (6539, 6580), False, 'from io_scene_niftools.utils.logging import NifLog\n')]
|
# Generated by Django 3.0.5 on 2022-03-09 18:26
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0003_auto_20220308_0149'),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Название категории')),
('slug', models.SlugField(max_length=70)),
('year_old', models.IntegerField(validators=[django.core.validators.MinValueValidator(1940, message='Не старше 1940 г.р.')])),
('year_yang', models.IntegerField(validators=[django.core.validators.MinValueValidator(1940, message='Не старше 1940 г.р.')])),
('number_start', models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1, message='Значение не меньше 1')])),
('number_end', models.IntegerField(null=True, validators=[django.core.validators.MinValueValidator(1, message='Значение не меньше 1')])),
('description', models.TextField(blank=True, max_length=150)),
],
),
migrations.CreateModel(
name='Cups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название Кубка')),
('slug', models.SlugField(max_length=70)),
('description', models.TextField(blank=True, max_length=150)),
],
),
migrations.AlterField(
model_name='races',
name='numbers_amount',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1, message='Количество не меньше 1')]),
),
migrations.CreateModel(
name='Participants',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='Имя')),
('surname', models.CharField(max_length=30, verbose_name='Фамилия')),
('patronymic', models.CharField(max_length=30, null=True, verbose_name='Отчество')),
('year', models.IntegerField(validators=[django.core.validators.MinValueValidator(1940, message='Не старше 1940 г.р.')], verbose_name='Год рождения')),
('number', models.IntegerField(validators=[django.core.validators.MinValueValidator(1, message='Не меньше 1')], verbose_name='Стартовый номер')),
('club', models.CharField(max_length=50, null=True, verbose_name='Принадлежность к клубу')),
('town', models.CharField(max_length=50, null=True, verbose_name='Из какого города?')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_participants', to='registration.Categories')),
('race', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='race_participants', to='registration.Races')),
],
),
migrations.AddField(
model_name='categories',
name='race',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='race_categories', to='registration.Races'),
),
migrations.AlterField(
model_name='races',
name='cup',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cup_races', to='registration.Cups'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.SlugField",
"django.db.models.AutoField"
] |
[((3493, 3617), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""race_categories"""', 'to': '"""registration.Races"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='race_categories', to='registration.Races')\n", (3510, 3617), False, 'from django.db import migrations, models\n'), ((3730, 3857), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""cup_races"""', 'to': '"""registration.Cups"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='cup_races', to='registration.Cups')\n", (3747, 3857), False, 'from django.db import migrations, models\n'), ((402, 495), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (418, 495), False, 'from django.db import migrations, models\n'), ((519, 585), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Название категории"""'}), "(max_length=50, verbose_name='Название категории')\n", (535, 585), False, 'from django.db import migrations, models\n'), ((613, 644), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (629, 644), False, 'from django.db import migrations, models\n'), ((1276, 1320), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(150)'}), '(blank=True, max_length=150)\n', (1292, 1320), False, 'from django.db import migrations, models\n'), ((1450, 1543), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1466, 1543), False, 'from django.db import migrations, models\n'), ((1567, 1630), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Название Кубка"""'}), "(max_length=200, verbose_name='Название Кубка')\n", (1583, 1630), False, 'from django.db import migrations, models\n'), ((1658, 1689), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (1674, 1689), False, 'from django.db import migrations, models\n'), ((1724, 1768), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(150)'}), '(blank=True, max_length=150)\n', (1740, 1768), False, 'from django.db import migrations, models\n'), ((2146, 2239), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2162, 2239), False, 'from django.db import migrations, models\n'), ((2263, 2314), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""Имя"""'}), "(max_length=30, verbose_name='Имя')\n", (2279, 2314), False, 'from django.db import migrations, models\n'), ((2345, 2400), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'verbose_name': '"""Фамилия"""'}), "(max_length=30, verbose_name='Фамилия')\n", (2361, 2400), False, 'from django.db import migrations, models\n'), ((2434, 2501), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'verbose_name': '"""Отчество"""'}), "(max_length=30, null=True, verbose_name='Отчество')\n", (2450, 2501), False, 'from django.db import migrations, models\n'), ((2859, 2945), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Принадлежность к клубу"""'}), "(max_length=50, null=True, verbose_name=\n 'Принадлежность к клубу')\n", (2875, 2945), False, 'from django.db import migrations, models\n'), ((2968, 3044), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Из какого города?"""'}), "(max_length=50, null=True, verbose_name='Из какого города?')\n", (2984, 3044), False, 'from django.db import migrations, models\n'), ((3076, 3211), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""category_participants"""', 'to': '"""registration.Categories"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='category_participants', to='registration.Categories')\n", (3093, 3211), False, 'from django.db import migrations, models\n'), ((3234, 3360), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""race_participants"""', 'to': '"""registration.Races"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='race_participants', to='registration.Races')\n", (3251, 3360), False, 'from django.db import migrations, models\n')]
|
from os import close
import json
import re
from PIL import Image
DIR = "/home/vishals/Dataset/UFPR-ALPR/UFPR-ALPR dataset/training"
FINAL = "data_coco_train.json"
def write_json(data, filename=FINAL):
with open(filename, 'w') as f:
json.dump(data, f, indent=4)
# ===================================================
# ================ Images Structure =================
# ===================================================
#
# {
# "id": 0,
# "license": 1,
# "file_name": "0001.jpg",
# "height": 275,
# "width": 490,
# "date_captured": "2020-07-20T19:39:26+00:00"
# },
def add_image_data(filename=FINAL):
ID = 0
with open(filename) as json_file:
data = json.load(json_file)
temp = data['images']
for i in range(60):
INTER_DIR = DIR + "/track00" + \
(("0" + str(i+1)) if i < 9 else str(i+1))
for j in range(30):
FILE_ABS = INTER_DIR + "/track00" + \
(("0" + str(i+1)) if i < 9 else str(i+1)) + \
"[" + ("0" + str(j + 1) if j < 9 else str(j+1)) + "]"
FILE_LCL = "../Dataset/data_files/track00" + \
(("0" + str(i+1)) if i < 9 else str(i+1)) + \
"[" + ("0" + str(j + 1) if j < 9 else str(j+1)) + "].png"
LINE = FILE_ABS + ".png"
im = Image.open(LINE)
(width, height) = im.size
xx = {
"id": ID,
"license": 1,
"file_name": FILE_LCL,
"height": height,
"width": width,
"date_captured": "2020-07-20T19:39:26+00:00"
}
ID = ID + 1
print(ID)
temp.append(xx)
write_json(data)
# ===================================================
# ============== Annotations Structure ==============
# ===================================================
# "annotations": [
# {
# "id": 1,
# "bbox": [
# 100,
# 116,
# 140,
# 170
# ],
# "image_id": 0,
# "segmentation": [],
# "ignore": 0,
# "area": 23800,
# "iscrowd": 0,
# "category_id": 0
# }
# ]
def add_annotations_data(filename=FINAL):
ID = 0
with open(filename) as json_file:
j_data = json.load(json_file)
temp = j_data['annotations']
for i in range(60):
INTER_DIR = DIR + "/track00" + \
(("0" + str(i+1)) if i < 9 else str(i+1))
for j in range(30):
FILE_ABS = INTER_DIR + "/track00" + \
(("0" + str(i+1)) if i < 9 else str(i+1)) + \
"[" + ("0" + str(j + 1) if j < 9 else str(j+1)) + "]"
LINE = "" + FILE_ABS + ".png"
f = open(FILE_ABS + ".txt", "rt")
data = f.readlines()[7][16:].replace(" ", ",").split("\n")
ld = []
for l in data:
if l != "":
ld = l.split(",")
ld[0] = int(ld[0])
ld[1] = int(ld[1])
ld[2] = int(ld[2])
ld[3] = int(ld[3])
im = Image.open(LINE)
(width, height) = im.size
im_size = width * height
xx = {
"id": ID,
"image_id": ID,
"bbox": ld,
"segmentation": [],
"area": im_size,
"iscrowd": 0,
"category_id": 1
}
ID = ID + 1
print(xx)
temp.append(xx)
write_json(j_data)
if __name__ == '__main__':
add_image_data()
add_annotations_data()
|
[
"json.dump",
"json.load",
"PIL.Image.open"
] |
[((247, 275), 'json.dump', 'json.dump', (['data', 'f'], {'indent': '(4)'}), '(data, f, indent=4)\n', (256, 275), False, 'import json\n'), ((767, 787), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (776, 787), False, 'import json\n'), ((2475, 2495), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2484, 2495), False, 'import json\n'), ((1446, 1462), 'PIL.Image.open', 'Image.open', (['LINE'], {}), '(LINE)\n', (1456, 1462), False, 'from PIL import Image\n'), ((3385, 3401), 'PIL.Image.open', 'Image.open', (['LINE'], {}), '(LINE)\n', (3395, 3401), False, 'from PIL import Image\n')]
|
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input
class CNN2Net(Model):
def __init__(self, output_dim=3):
"""
Args:
output_dim: 网络需要三个输出,对应3个动作的累积回报
"""
super(CNN2Net, self).__init__()
self.c1 = Conv2D(filters=16, kernel_size=(4, 4), strides=2, activation='relu') # 卷积层
self.c2 = Conv2D(filters=32, kernel_size=(2, 2), strides=1, activation='relu') # 卷积层
self.flatten = Flatten()
self.d1 = Dense(64, activation='relu')
self.d3 = Dense(output_dim)
def call(self, x):
x = self.c1(x)
x = self.c2(x)
x = self.flatten(x)
x = self.d1(x)
# x = self.d2(x)
y = self.d3(x)
return y
def model(self):
x = Input(shape=(6, 6, 1))
return Model(inputs=[x], outputs=self.call(x))
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten"
] |
[((305, 373), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(4, 4)', 'strides': '(2)', 'activation': '"""relu"""'}), "(filters=16, kernel_size=(4, 4), strides=2, activation='relu')\n", (311, 373), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n'), ((399, 467), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(2, 2)', 'strides': '(1)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(2, 2), strides=1, activation='relu')\n", (405, 467), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n'), ((498, 507), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (505, 507), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n'), ((526, 554), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (531, 554), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n'), ((573, 590), 'tensorflow.keras.layers.Dense', 'Dense', (['output_dim'], {}), '(output_dim)\n', (578, 590), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n'), ((811, 833), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(6, 6, 1)'}), '(shape=(6, 6, 1))\n', (816, 833), False, 'from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input\n')]
|
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
import pytest
from pygam import *
from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList
from pygam.utils import flatten
@pytest.fixture
def chicago_gam(chicago_X_y):
X, y = chicago_X_y
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
return gam
def test_wrong_length():
"""iterable params must all match lengths
"""
with pytest.raises(ValueError):
SplineTerm(0, lam=[0, 1, 2], penalties=['auto', 'auto'])
def test_num_coefs(mcycle_X_y, wage_X_y):
"""make sure this method gives correct values
"""
X, y = mcycle_X_y
term = Intercept().compile(X)
assert term.n_coefs == 1
term = LinearTerm(0).compile(X)
assert term.n_coefs == 1
term = SplineTerm(0).compile(X)
assert term.n_coefs == term.n_splines
X, y = wage_X_y
term = FactorTerm(2).compile(X)
assert term.n_coefs == 5
term_a = SplineTerm(0).compile(X)
term_b = SplineTerm(1).compile(X)
term = TensorTerm(term_a, term_b).compile(X)
assert term.n_coefs == term_a.n_coefs * term_b.n_coefs
def test_term_list_removes_duplicates():
"""prove that we remove duplicated terms"""
term = SplineTerm(0)
term_list = term + term
assert isinstance(term_list, TermList)
assert len(term_list) == 1
def test_tensor_invariance_to_scaling(chicago_gam, chicago_X_y):
"""a model with tensor terms should give results regardless of input scaling
"""
X, y = chicago_X_y
X[:, 3] = X[:, 3] * 100
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
assert np.allclose(gam.coef_, chicago_gam.coef_, atol=1e-6)
def test_tensor_must_have_at_least_2_marginal_terms():
with pytest.raises(ValueError):
te(0)
def test_tensor_term_expands_args_to_match_penalties_and_terms():
tensor = te(0, 1, lam=3)
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties='auto')
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties=['auto', ['auto', 'auto']])
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 3
def test_tensor_term_skips_kwargs_when_marginal_term_is_supplied():
tensor = te(0, s(1), n_splines=420)
assert tensor._terms[0].n_coefs == 420
assert tensor._terms[1].n_coefs != 420
def test_tensor_term_doesnt_accept_tensor_terms():
with pytest.raises(ValueError):
te(l(0), te(0, 1))
def test_tensor_args_length_must_agree_with_number_of_terms():
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3, 3, 3])
def test_build_from_info():
"""we can rebuild terms from info
"""
terms = [Intercept(),
LinearTerm(0),
SplineTerm(0),
FactorTerm(0),
TensorTerm(0,1)]
for term in terms:
assert Term.build_from_info(term.info) == term
assert te(0, 1) == TensorTerm(SplineTerm(0, n_splines=10), SplineTerm(1, n_splines=10))
def test_by_variable():
"""our fit on the toy tensor dataset with a by variable on the linear feature
should be similar to the fit with a tensor product of a spline with a linear
term
"""
pass
def test_by_variable_doesnt_exist_in_X(mcycle_X_y):
"""raises a value error if we cannot locate the by variable
"""
term = s(0, by=1)
with pytest.raises(ValueError):
term.compile(mcycle_X_y[0])
def test_term_list_from_info():
"""we can remake a term list from info
"""
term_list = SplineTerm(0) + LinearTerm(1)
assert Term.build_from_info(term_list.info) == term_list
def test_term_list_only_accepts_terms_or_term_list():
TermList()
with pytest.raises(ValueError):
TermList(None)
def test_pop_term_from_term_list():
term_list = SplineTerm(0) + LinearTerm(1) + Intercept()
term_list_2 = deepcopy(term_list)
# by default we pop the last
assert term_list_2.pop() == term_list[-1]
assert term_list_2.pop(0) == term_list[0]
with pytest.raises(ValueError):
term_list_2.pop(1) == term_list[0]
def test_no_multiply():
"""trying to multiply terms raises an error
"""
with pytest.raises(NotImplementedError):
SplineTerm(0) * LinearTerm(1)
term_list = SplineTerm(0) + LinearTerm(1)
with pytest.raises(NotImplementedError):
term_list * term_list
def test_by_is_similar_to_tensor_with_linear_term(toy_interaction_X_y):
"""for simple interactions we can acheive equivalent fits using:
- a spline with a by-variable
- a tensor between spline and a linear term
"""
X, y = toy_interaction_X_y
gam_a = LinearGAM(te(s(0, n_splines=20), l(1))).fit(X, y)
gam_b = LinearGAM(s(0, by=1)).fit(X, y)
r2_a = gam_a.statistics_['pseudo_r2']['explained_deviance']
r2_b = gam_b.statistics_['pseudo_r2']['explained_deviance']
assert np.allclose(r2_a, r2_b)
def test_correct_smoothing_in_tensors(toy_interaction_X_y):
"""check that smoothing penalties are correctly computed across the marginal
dimensions
feature 0 is the sinusoid, so this one needs to be wiggly
feature 1 is the linear function, so this can smoothed heavily
"""
X, y = toy_interaction_X_y
# increase smoothing on linear function heavily, to no detriment
gam = LinearGAM(te(0, 1, lam=[0.6, 10000])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] > 0.9
# smoothing the sinusoid function heavily reduces fit quality
gam = LinearGAM(te(0, 1, lam=[10000, 0.6])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] < 0.1
def test_dummy_encoding(wage_X_y, wage_gam):
"""check that dummy encoding produces fewer coefficients than one-hot"""
X, y = wage_X_y
gam = LinearGAM(s(0) + s(1) + f(2, coding='dummy')).fit(X, y)
assert gam._modelmat(X=X, term=2).shape[1] == 4
assert gam.terms[2].n_coefs == 4
assert wage_gam._modelmat(X=X, term=2).shape[1] == 5
assert wage_gam.terms[2].n_coefs == 5
def test_build_cyclic_p_spline(hepatitis_X_y):
"""check the cyclic p spline builds
the r2 for a cyclic gam on a obviously aperiodic function should suffer
"""
X, y = hepatitis_X_y
# unconstrained gam
gam = LinearGAM(s(0)).fit(X, y)
r_unconstrained = gam.statistics_['pseudo_r2']['explained_deviance']
# cyclic gam
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
r_cyclic = gam.statistics_['pseudo_r2']['explained_deviance']
assert r_unconstrained > r_cyclic
def test_cyclic_p_spline_periodicity(hepatitis_X_y):
"""check the cyclic p spline behavioves periodically
namely:
- the value at the edge knots should be the same
- extrapolation should be periodic
"""
X, y = hepatitis_X_y
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
# check periodicity
left = gam.edge_knots_[0][1]
right = gam.edge_knots_[0][1]
assert(gam.predict(left) == gam.predict(right))
# check extrapolation
further = right + (right - left)
assert(gam.predict(further) == gam.predict(right))
def test_cyclic_p_spline_custom_period():
"""show that we can set custom edge_knots, and that these affect our model's
performance
"""
# define square wave
X = np.linspace(0, 1, 5000)
y = X > 0.5
# when modeling the full period, we get close with a periodic basis
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0)).fit(X, y)
assert np.allclose(gam.predict(X), y)
assert np.allclose(gam.edge_knots_[0], [0, 1])
# when modeling a non-periodic function, our periodic model fails
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5])).fit(X, y)
assert np.allclose(gam.predict(X), 0.5)
assert np.allclose(gam.edge_knots_[0], [0, 0.5])
def test_tensor_terms_have_constraints(toy_interaction_X_y):
"""test that we can fit a gam with constrained tensor terms,
even if those constraints are 'none'
"""
X, y = toy_interaction_X_y
gam = LinearGAM(te(0, 1, constraints='none')).fit(X, y)
assert gam._is_fitted
assert gam.terms.hasconstraint
def test_tensor_composite_constraints_equal_penalties():
"""check that the composite constraint matrix for a tensor term
is equivalent to a penalty matrix under the correct conditions
"""
from pygam.penalties import derivative
def der1(*args, **kwargs):
kwargs.update({'derivative':1})
return derivative(*args, **kwargs)
# create a 3D tensor where the penalty should be equal to the constraint
term = te(0, 1, 2,
n_splines=[4, 5, 6],
penalties=der1,
lam=1,
constraints='monotonic_inc')
# check all the dimensions
for i in range(3):
P = term._build_marginal_penalties(i).A
C = term._build_marginal_constraints(i,
-np.arange(term.n_coefs),
constraint_lam=1,
constraint_l2=0).A
assert (P == C).all()
def test_tensor_with_constraints(hepatitis_X_y):
"""we should be able to fit a gam with not 'none' constraints on a tensor term
and observe its effect in reducing the R2 of the fit
"""
X, y = hepatitis_X_y
X = np.c_[X, np.random.randn(len(X))] # add a random interaction data
# constrain useless dimension
gam_useless_constraint = LinearGAM(te(0, 1,
constraints=['none', 'monotonic_dec'],
n_splines=[20, 4]))
gam_useless_constraint.fit(X, y)
# constrain informative dimension
gam_constrained = LinearGAM(te(0, 1,
constraints=['monotonic_dec', 'none'],
n_splines=[20, 4]))
gam_constrained.fit(X, y)
assert gam_useless_constraint.statistics_['pseudo_r2']['explained_deviance'] > 0.5
assert gam_constrained.statistics_['pseudo_r2']['explained_deviance'] < 0.1
class TestRegressions(object):
def test_no_auto_dtype(self):
with pytest.raises(ValueError):
SplineTerm(feature=0, dtype='auto')
def test_compose_penalties(self):
"""penalties should be composable, and this is done by adding all
penalties on a single term, NOT multiplying them.
so a term with a derivative penalty and a None penalty should be equvalent
to a term with a derivative penalty.
"""
base_term = SplineTerm(0)
term = SplineTerm(feature=0, penalties=['auto', 'none'])
# penalties should be equivalent
assert (term.build_penalties() == base_term.build_penalties()).A.all()
# multitple penalties should be additive, not multiplicative,
# so 'none' penalty should have no effect
assert np.abs(term.build_penalties().A).sum() > 0
def test_compose_constraints(self, hepatitis_X_y):
"""we should be able to compose penalties
here we show that a gam with a monotonic increasing penalty composed with a monotonic decreasing
penalty is equivalent to a gam with only an intercept
"""
X, y = hepatitis_X_y
gam_compose = LinearGAM(s(0, constraints=['monotonic_inc', 'monotonic_dec'])).fit(X, y)
gam_intercept = LinearGAM(terms=None).fit(X, y)
assert np.allclose(gam_compose.coef_[-1], gam_intercept.coef_)
def test_constraints_and_tensor(self, chicago_X_y):
"""a model that has consrtraints and tensor terms should not fail to build
because of inability of tensor terms to build a 'none' constraint
"""
X, y = chicago_X_y
gam = PoissonGAM(s(0, constraints='monotonic_inc') + te(3, 1) + s(2)).fit(X, y)
assert gam._is_fitted
|
[
"copy.deepcopy",
"pygam.utils.flatten",
"numpy.allclose",
"pygam.terms.FactorTerm",
"pygam.penalties.derivative",
"pygam.terms.Term.build_from_info",
"pytest.raises",
"pygam.terms.TensorTerm",
"numpy.arange",
"pygam.terms.SplineTerm",
"numpy.linspace",
"pygam.terms.LinearTerm",
"pygam.terms.Intercept",
"pygam.terms.TermList"
] |
[((1283, 1296), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (1293, 1296), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1693, 1746), 'numpy.allclose', 'np.allclose', (['gam.coef_', 'chicago_gam.coef_'], {'atol': '(1e-06)'}), '(gam.coef_, chicago_gam.coef_, atol=1e-06)\n', (1704, 1746), True, 'import numpy as np\n'), ((3908, 3918), 'pygam.terms.TermList', 'TermList', ([], {}), '()\n', (3916, 3918), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4093, 4112), 'copy.deepcopy', 'deepcopy', (['term_list'], {}), '(term_list)\n', (4101, 4112), False, 'from copy import deepcopy\n'), ((5125, 5148), 'numpy.allclose', 'np.allclose', (['r2_a', 'r2_b'], {}), '(r2_a, r2_b)\n', (5136, 5148), True, 'import numpy as np\n'), ((7511, 7534), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5000)'], {}), '(0, 1, 5000)\n', (7522, 7534), True, 'import numpy as np\n'), ((7754, 7793), 'numpy.allclose', 'np.allclose', (['gam.edge_knots_[0]', '[0, 1]'], {}), '(gam.edge_knots_[0], [0, 1])\n', (7765, 7793), True, 'import numpy as np\n'), ((8018, 8059), 'numpy.allclose', 'np.allclose', (['gam.edge_knots_[0]', '[0, 0.5]'], {}), '(gam.edge_knots_[0], [0, 0.5])\n', (8029, 8059), True, 'import numpy as np\n'), ((486, 511), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (499, 511), False, 'import pytest\n'), ((521, 577), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {'lam': '[0, 1, 2]', 'penalties': "['auto', 'auto']"}), "(0, lam=[0, 1, 2], penalties=['auto', 'auto'])\n", (531, 577), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1811, 1836), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1824, 1836), False, 'import pytest\n'), ((2525, 2550), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2538, 2550), False, 'import pytest\n'), ((2652, 2677), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2665, 2677), False, 'import pytest\n'), ((2715, 2740), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2728, 2740), False, 'import pytest\n'), ((2778, 2803), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2791, 2803), False, 'import pytest\n'), ((2925, 2936), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (2934, 2936), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((2951, 2964), 'pygam.terms.LinearTerm', 'LinearTerm', (['(0)'], {}), '(0)\n', (2961, 2964), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((2979, 2992), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (2989, 2992), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3007, 3020), 'pygam.terms.FactorTerm', 'FactorTerm', (['(0)'], {}), '(0)\n', (3017, 3020), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3035, 3051), 'pygam.terms.TensorTerm', 'TensorTerm', (['(0)', '(1)'], {}), '(0, 1)\n', (3045, 3051), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3594, 3619), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3607, 3619), False, 'import pytest\n'), ((3757, 3770), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (3767, 3770), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3773, 3786), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (3783, 3786), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3799, 3835), 'pygam.terms.Term.build_from_info', 'Term.build_from_info', (['term_list.info'], {}), '(term_list.info)\n', (3819, 3835), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3928, 3953), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3941, 3953), False, 'import pytest\n'), ((3963, 3977), 'pygam.terms.TermList', 'TermList', (['None'], {}), '(None)\n', (3971, 3977), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4063, 4074), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (4072, 4074), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4250, 4275), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4263, 4275), False, 'import pytest\n'), ((4410, 4444), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4423, 4444), False, 'import pytest\n'), ((4501, 4514), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4511, 4514), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4517, 4530), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4527, 4530), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4540, 4574), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4553, 4574), False, 'import pytest\n'), ((8720, 8747), 'pygam.penalties.derivative', 'derivative', (['*args'], {}), '(*args, **kwargs)\n', (8730, 8747), False, 'from pygam.penalties import derivative\n'), ((10816, 10829), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (10826, 10829), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((10845, 10894), 'pygam.terms.SplineTerm', 'SplineTerm', ([], {'feature': '(0)', 'penalties': "['auto', 'none']"}), "(feature=0, penalties=['auto', 'none'])\n", (10855, 10894), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((11679, 11734), 'numpy.allclose', 'np.allclose', (['gam_compose.coef_[-1]', 'gam_intercept.coef_'], {}), '(gam_compose.coef_[-1], gam_intercept.coef_)\n', (11690, 11734), True, 'import numpy as np\n'), ((713, 724), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (722, 724), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((777, 790), 'pygam.terms.LinearTerm', 'LinearTerm', (['(0)'], {}), '(0)\n', (787, 790), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((843, 856), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (853, 856), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((943, 956), 'pygam.terms.FactorTerm', 'FactorTerm', (['(2)'], {}), '(2)\n', (953, 956), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1011, 1024), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (1021, 1024), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1049, 1062), 'pygam.terms.SplineTerm', 'SplineTerm', (['(1)'], {}), '(1)\n', (1059, 1062), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1085, 1111), 'pygam.terms.TensorTerm', 'TensorTerm', (['term_a', 'term_b'], {}), '(term_a, term_b)\n', (1095, 1111), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1995, 2014), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2002, 2014), False, 'from pygam.utils import flatten\n'), ((2109, 2128), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2116, 2128), False, 'from pygam.utils import flatten\n'), ((2243, 2262), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2250, 2262), False, 'from pygam.utils import flatten\n'), ((3091, 3122), 'pygam.terms.Term.build_from_info', 'Term.build_from_info', (['term.info'], {}), '(term.info)\n', (3111, 3122), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3166, 3193), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {'n_splines': '(10)'}), '(0, n_splines=10)\n', (3176, 3193), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3195, 3222), 'pygam.terms.SplineTerm', 'SplineTerm', (['(1)'], {'n_splines': '(10)'}), '(1, n_splines=10)\n', (3205, 3222), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4031, 4044), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4041, 4044), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4047, 4060), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4057, 4060), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4454, 4467), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4464, 4467), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4470, 4483), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4480, 4483), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((10409, 10434), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10422, 10434), False, 'import pytest\n'), ((10448, 10483), 'pygam.terms.SplineTerm', 'SplineTerm', ([], {'feature': '(0)', 'dtype': '"""auto"""'}), "(feature=0, dtype='auto')\n", (10458, 10483), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((9175, 9198), 'numpy.arange', 'np.arange', (['term.n_coefs'], {}), '(term.n_coefs)\n', (9184, 9198), True, 'import numpy as np\n')]
|
from serial import Serial
import time
from PyQt5.QtCore import pyqtSignal, QObject, QTimer
from PyQt5.QtWidgets import QMessageBox
from PyQt5 import QtTest
from math import isclose
import numpy as np
class Stages(QObject):
def __init__(self,parent=None):
super().__init__()
# 1 We may need to add a message box before duing FRF
# 2 Also, probably better to move to ref position in unreferenced mode
self.mainWindow=parent
self.sample_y_pos = 165
self.sample_x_pos = 52.57
self.axis_names = ['Y','X','Z']
self.axis_max = [204., 102., 12.5]
self.axis_signs = [-1., -1., 1.]
self.tol = [[1,30], [self.sample_x_pos-3,self.axis_max[1]-self.sample_x_pos-3], [0.1,0]] ## do not change this
self.offsets = [1., self.sample_x_pos, 0.1]
self.home = [0., 0., 0]
self.vels = [10, 0.5, 0.5]
self.accs = [0.1,0.1, 0.1]
self.positions = np.asarray([0.,0.,0.]) # YXZ
self.steps = np.asarray([.1,.1,.1])
self.stop = False
self.pos_box = [self.mainWindow.stage_y_pos,self.mainWindow.stage_x_pos,self.mainWindow.stage_z_pos]
self.pos_boxv =[self.mainWindow.stage_y_posv,self.mainWindow.stage_x_posv,self.mainWindow.stage_z_posv]
self.step_box =[self.mainWindow.y_step,self.mainWindow.x_step,self.mainWindow.z_step]
self.step_button=[[self.mainWindow.y_plus_btn,self.mainWindow.y_minus_btn],
[self.mainWindow.x_plus_btn,self.mainWindow.x_minus_btn],
[self.mainWindow.z_plus_btn,self.mainWindow.z_minus_btn]]
try:
self.ser = Serial('COM6', baudrate=115200, timeout=2) # factory setting
self.mainWindow.halt_stages_btn.clicked.connect(self.stop_stages)
self.mainWindow.home_stages_btn.clicked.connect(self.home_stage)
self.mainWindow.stage_set.clicked.connect(self.set_move)
self.step_box[0].setValue(self.steps[0])
self.step_box[1].setValue(self.steps[1])
self.step_box[2].setValue(self.steps[2])
self.step_box[0].valueChanged.connect(lambda axis: self.step_changed(axis=0))
self.step_box[1].valueChanged.connect(lambda axis: self.step_changed(axis=1))
self.step_box[2].valueChanged.connect(lambda axis: self.step_changed(axis=2))
self.step_button[0][0].clicked.connect(lambda args: self.step_move(args=(0,1)))
self.step_button[0][1].clicked.connect(lambda args: self.step_move(args=(0,-1)))
self.step_button[1][0].clicked.connect(lambda args: self.step_move(args=(1,1)))
self.step_button[1][1].clicked.connect(lambda args: self.step_move(args=(1,-1)))
self.step_button[2][0].clicked.connect(lambda args: self.step_move(args=(2,1)))
self.step_button[2][1].clicked.connect(lambda args: self.step_move(args=(2,-1)))
self.stage_init()
self.timer = QTimer()
self.timer.setInterval(500) # refresh position info at 2 Hz
self.timer.timeout.connect(self.get_positions)
self.timer.start()
print('Stage connected')
except:
self.ser=None
self.mainWindow.stage_set.setEnabled(False)
self.mainWindow.halt_stages_btn.setEnabled(False)
self.mainWindow.home_stages_btn.setEnabled(False)
for i in range(3):
self.pos_box[i].setEnabled(False)
self.pos_boxv[i].setEnabled(False)
self.step_box[i].setEnabled(False)
self.step_button[i][0].setEnabled(False)
self.step_button[i][1].setEnabled(False)
print('Stage not available')
def close(self):
if self.ser:
print('homing the stage...')
#self.safe_move_axes(self.home)
self.timer.stop()
for axis in range(3):
self.ser.write(("SVO "+str(axis+1)+" 0 \n").encode())
self.ser.close()
print('Stage disconnected')
self.deleteLater()
def get_positions(self):
get_position_command=("POS?\n").encode()
self.ser.write(get_position_command)
for axis in range(3):
v = float(self.ser.readline().decode()[2:])
self.positions[axis] = (self.axis_signs[axis]*v) % self.axis_max[axis]-self.offsets[axis]
self.pos_box[axis].setText(format(self.positions[axis],'.3f'))
def home_stage(self):
reply = QMessageBox.question(self.mainWindow, 'Home stage?', 'Do you want to move the stage home?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.safe_move_axes(self.home)
### moving functions ###
def move_axis(self,axis,pos):
def move(axis,pos):
new_pos=self.axis_signs[axis]*(pos + self.offsets[axis]) % self.axis_max[axis]
move_command=("MOV "+str(axis+1)+" {} \n".format(new_pos)).encode()
self.ser.write(move_command)
if self.isValid(axis,pos) and not self.stop:
if (abs(pos-self.positions[axis])<15):
move(axis,pos)
else:
self.set_acc(axis,10)
move(axis,pos)
self.wait()
self.set_acc(axis,1)
def safe_move_axes(self,target,safe=True):
self.stop=False
for axis in range(3):
self.pos_boxv[axis].setValue(target[axis])
if safe:
self.move_axis(2,self.home[2])
self.wait()
self.move_axis(1,target[1])
self.move_axis(0,target[0])
self.wait()
self.move_axis(2,target[2])
else:
self.move_axis(1,target[1])
self.move_axis(0,target[0])
self.move_axis(2,target[2])
def isValid(self,axis,pos):
# Allow the stage to go up only when the Y coordinate is 160 ± 5mm from the imaging center
if ((axis==2) and (pos>self.positions[axis]) and
((self.positions[0]<(self.sample_y_pos-3)) or (self.positions[0]>(self.sample_y_pos+3)))):
self.mainWindow.info_area.setText('Z moving up is only allowed when ' + str(self.sample_y_pos-3) +" <= Y <= " + str(self.sample_y_pos+3))
return False
# Forbid stage to move Y beyond 160 ± 5mm when Z is nonzero
if ((axis==0) and (self.positions[2]>0.01) and
((pos<(self.sample_y_pos-5)) or (pos>(self.sample_y_pos+5)))):
self.mainWindow.info_area.setText('Y movement beyond ' + str(self.sample_y_pos-3) +" <= Y <= " + str(self.sample_y_pos+3)
+' is not allowed when Z>0.01')
return False
## Check the value within the limit
pos=pos+self.offsets[axis]
if (pos>=self.tol[axis][0]) and (pos<=(self.axis_max[axis]-self.tol[axis][1])):
return True
else:
self.mainWindow.info_area.setText(self.axis_names[axis]+' axis beyond the range')
self.mainWindow.info_area.append('The value should be between '+str(self.tol[axis][0]-self.offsets[axis])
+' and ' +str(self.axis_max[axis]-self.tol[axis][1]-self.offsets[axis]))
return False
def wait(self):
moving = True
while moving:
QtTest.QTest.qWait(500) # Check every .5 second
self.ser.write(b'\x05')
s=int(self.ser.readline().decode("utf-8"))
moving = False if s==0 else True
#### Interface functions ####
def set_move(self,axis):
target=[self.pos_boxv[axis].value() for axis in range(3)]
XYdist=np.sqrt((target[0]-self.positions[0])**2+(target[1]-self.positions[1])**2)
Zdist=np.abs(target)
if XYdist>10:
self.safe_move_axes(target)
else:
self.safe_move_axes(target,safe=False)
def step_move(self,args):
axis = args[0]
direction = args[1] # sign should be 1 or -1
self.stop=False
pos=self.positions[axis]+self.steps[axis]*direction
self.pos_boxv[axis].setValue(pos)
self.move_axis(axis,pos)
### msc functions ###
def step_changed(self,axis):
self.steps[axis] = self.step_box[axis].value()
def stop_stages(self):
self.stop=True
stop_command = ("STP\n").encode()
self.ser.write(stop_command)
def set_acc(self,axis,scaling):
self.ser.write(("ACC "+str(axis+1)+" {} \n".format(self.accs[axis]*scaling)).encode())
self.ser.write(("DEC "+str(axis+1)+" {} \n".format(self.accs[axis]*scaling)).encode())
def stage_init(self):
## initializing the stage
for axis in range(3):
self.ser.write(("SVO "+str(axis+1)+" 1 \n").encode())
reference = 1
self.ser.write("FRF? \n".encode())
for axis in range(3):
reference *= int(self.ser.readline().decode()[2])
if reference==1:
print('stage initialized')
else:
print('stage not initialized')
print('stage initialing...')
self.ser.write(("FRF \n").encode()) ## This is extremely slow. Needs improvement
self.wait()
print('stage initialized')
## Setting preferences, homing stages
for axis in range(3):
self.ser.write(("VEL "+str(axis+1)+" {} \n".format(self.vels[axis])).encode())
self.set_acc(axis,1)
self.get_positions()
print('homing the stage...')
#self.safe_move_axes(target=self.home)
self.wait()
self.get_positions()
for axis in range(3):
self.pos_boxv[axis].setValue(self.positions[axis])
|
[
"serial.Serial",
"PyQt5.QtCore.QTimer",
"numpy.abs",
"numpy.asarray",
"PyQt5.QtTest.QTest.qWait",
"PyQt5.QtWidgets.QMessageBox.question",
"numpy.sqrt"
] |
[((1056, 1083), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1066, 1083), True, 'import numpy as np\n'), ((1110, 1137), 'numpy.asarray', 'np.asarray', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (1120, 1137), True, 'import numpy as np\n'), ((5024, 5173), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self.mainWindow', '"""Home stage?"""', '"""Do you want to move the stage home?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self.mainWindow, 'Home stage?',\n 'Do you want to move the stage home?', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n", (5044, 5173), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((8771, 8860), 'numpy.sqrt', 'np.sqrt', (['((target[0] - self.positions[0]) ** 2 + (target[1] - self.positions[1]) ** 2)'], {}), '((target[0] - self.positions[0]) ** 2 + (target[1] - self.positions[\n 1]) ** 2)\n', (8778, 8860), True, 'import numpy as np\n'), ((8860, 8874), 'numpy.abs', 'np.abs', (['target'], {}), '(target)\n', (8866, 8874), True, 'import numpy as np\n'), ((1807, 1849), 'serial.Serial', 'Serial', (['"""COM6"""'], {'baudrate': '(115200)', 'timeout': '(2)'}), "('COM6', baudrate=115200, timeout=2)\n", (1813, 1849), False, 'from serial import Serial\n'), ((3233, 3241), 'PyQt5.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (3239, 3241), False, 'from PyQt5.QtCore import pyqtSignal, QObject, QTimer\n'), ((8391, 8414), 'PyQt5.QtTest.QTest.qWait', 'QtTest.QTest.qWait', (['(500)'], {}), '(500)\n', (8409, 8414), False, 'from PyQt5 import QtTest\n')]
|
import numpy as np
from scipy.ndimage import gaussian_filter1d
from . import ExpFilter, Source, Visualizer
from .melbank import compute_melmat
class Sampler:
y_rolling: np.ndarray
source: Source
_gamma_table = None
def __init__(self, source: Source, visualizer: Visualizer, gamma_table_path: str = None, num_pixels: int = 60,
max_pixels_per_packet: int = 126, min_volume_threshold: int = 1e-7,
num_frames_rolling_window: int = 2, num_frequency_bins: int = 24,
min_freq: int = 200, max_freq: int = 12000
):
self.num_pixels = num_pixels
self.source = source
self.visualizer = visualizer
self.pixels = np.tile(1, (3, self.num_pixels))
self.prev_sample = np.tile(253, (3, self.num_pixels))
self.y_rolling = np.random.rand(num_frames_rolling_window, int(source.rate / source.fps)) / 1e16
self.fft_window = np.hamming(int(source.rate / source.fps) * num_frames_rolling_window)
self.mel_gain = ExpFilter(np.tile(1e-1, num_frequency_bins),
alpha_decay=0.01, alpha_rise=0.99)
self.mel_smoothing = ExpFilter(np.tile(1e-1, num_frequency_bins),
alpha_decay=0.5, alpha_rise=0.99)
self.mel_y, _ = compute_melmat(num_mel_bands=num_frequency_bins,
freq_min=min_freq,
freq_max=max_freq,
num_fft_bands=int(source.rate * num_frames_rolling_window / (2.0 * source.fps)),
sample_rate=source.rate)
self.min_vol = min_volume_threshold
if gamma_table_path:
self._gamma_table = np.load(gamma_table_path)
if max_pixels_per_packet:
self.max_pixels_per_packet = max_pixels_per_packet
def sample(self) -> bytes:
# Truncate values and cast to integer
p = np.clip(self.pixels, 0, 255).astype(int)
if self._gamma_table:
p = self._gamma_table[p]
idxs = [i for i in range(p.shape[1]) if not np.array_equal(p[:, i], self.prev_sample[:, i])]
n_packets = len(idxs) // self.max_pixels_per_packet + 1
idxs = np.array_split(idxs, n_packets)
m = []
for idx in idxs:
for i in idx:
m.append(i) # Index of pixel to change
m.append(p[0][i]) # Pixel red value
m.append(p[1][i]) # Pixel green value
m.append(p[2][i]) # Pixel blue value
self.prev_sample = np.copy(p)
return bytes(m)
def update_sample(self) -> np.ndarray:
y = self.source.audio_sample() / 2.0 ** 15
self.y_rolling[:-1] = self.y_rolling[1:]
self.y_rolling[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_rolling, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
if vol < self.min_vol:
self.pixels = np.tile(0, (3, self.num_pixels))
else:
rolling_len = len(y_data)
n_zeros = 2 ** int(np.ceil(np.log2(rolling_len))) - rolling_len
# Pad with zeros until the next power of two
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, n_zeros), mode='constant')
# Construct a Mel filterbank from the FFT data
mel = np.atleast_2d(
np.abs(np.fft.rfft(y_padded)[:rolling_len // 2])
).T * self.mel_y.T
# Scale data to values more suitable for visualization
mel = np.sum(mel, axis=0)
mel = mel ** 2.0
# Gain normalization
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
# Map filterbank output onto LED strip
self.pixels = self.visualizer.visualize(mel)
return self.pixels
|
[
"numpy.pad",
"numpy.load",
"numpy.fft.rfft",
"numpy.abs",
"numpy.sum",
"numpy.copy",
"numpy.array_equal",
"scipy.ndimage.gaussian_filter1d",
"numpy.log2",
"numpy.clip",
"numpy.tile",
"numpy.array_split",
"numpy.concatenate"
] |
[((719, 751), 'numpy.tile', 'np.tile', (['(1)', '(3, self.num_pixels)'], {}), '(1, (3, self.num_pixels))\n', (726, 751), True, 'import numpy as np\n'), ((779, 813), 'numpy.tile', 'np.tile', (['(253)', '(3, self.num_pixels)'], {}), '(253, (3, self.num_pixels))\n', (786, 813), True, 'import numpy as np\n'), ((2279, 2310), 'numpy.array_split', 'np.array_split', (['idxs', 'n_packets'], {}), '(idxs, n_packets)\n', (2293, 2310), True, 'import numpy as np\n'), ((2622, 2632), 'numpy.copy', 'np.copy', (['p'], {}), '(p)\n', (2629, 2632), True, 'import numpy as np\n'), ((2833, 2843), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2840, 2843), True, 'import numpy as np\n'), ((1049, 1081), 'numpy.tile', 'np.tile', (['(0.1)', 'num_frequency_bins'], {}), '(0.1, num_frequency_bins)\n', (1056, 1081), True, 'import numpy as np\n'), ((1192, 1224), 'numpy.tile', 'np.tile', (['(0.1)', 'num_frequency_bins'], {}), '(0.1, num_frequency_bins)\n', (1199, 1224), True, 'import numpy as np\n'), ((1778, 1803), 'numpy.load', 'np.load', (['gamma_table_path'], {}), '(gamma_table_path)\n', (1785, 1803), True, 'import numpy as np\n'), ((2941, 2955), 'numpy.abs', 'np.abs', (['y_data'], {}), '(y_data)\n', (2947, 2955), True, 'import numpy as np\n'), ((3014, 3046), 'numpy.tile', 'np.tile', (['(0)', '(3, self.num_pixels)'], {}), '(0, (3, self.num_pixels))\n', (3021, 3046), True, 'import numpy as np\n'), ((3293, 3338), 'numpy.pad', 'np.pad', (['y_data', '(0, n_zeros)'], {'mode': '"""constant"""'}), "(y_data, (0, n_zeros), mode='constant')\n", (3299, 3338), True, 'import numpy as np\n'), ((3612, 3631), 'numpy.sum', 'np.sum', (['mel'], {'axis': '(0)'}), '(mel, axis=0)\n', (3618, 3631), True, 'import numpy as np\n'), ((1991, 2019), 'numpy.clip', 'np.clip', (['self.pixels', '(0)', '(255)'], {}), '(self.pixels, 0, 255)\n', (1998, 2019), True, 'import numpy as np\n'), ((2861, 2899), 'numpy.concatenate', 'np.concatenate', (['self.y_rolling'], {'axis': '(0)'}), '(self.y_rolling, axis=0)\n', (2875, 2899), True, 'import numpy as np\n'), ((2151, 2198), 'numpy.array_equal', 'np.array_equal', (['p[:, i]', 'self.prev_sample[:, i]'], {}), '(p[:, i], self.prev_sample[:, i])\n', (2165, 2198), True, 'import numpy as np\n'), ((3734, 3767), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['mel'], {'sigma': '(1.0)'}), '(mel, sigma=1.0)\n', (3751, 3767), False, 'from scipy.ndimage import gaussian_filter1d\n'), ((3138, 3158), 'numpy.log2', 'np.log2', (['rolling_len'], {}), '(rolling_len)\n', (3145, 3158), True, 'import numpy as np\n'), ((3454, 3475), 'numpy.fft.rfft', 'np.fft.rfft', (['y_padded'], {}), '(y_padded)\n', (3465, 3475), True, 'import numpy as np\n')]
|
import random
from telegram import Update
from telegram.ext import MessageHandler, Filters
from feature.base_command import BaseCommand
class ReplyKudoMessage(BaseCommand):
def __init__(self):
self.messages = [
"Sick Beast!",
"Teamwork!",
"Aye aye boi",
"Sugoi des",
"Subarashi",
"Hen Hao!",
"Good boi!"
]
@property
def help_message(self):
return "@{tele_username}++ {description} to give kudo, more features coming soon!\n\n"
@property
def handler(self):
def reply_kudo(update: Update, _) -> None:
text = update.message.text
list_of_words = text.split(' ')
first_word = list_of_words[0]
if first_word[-2:] == "++":
username = first_word[:-2]
message = ' '.join(list_of_words[1:]) if len(list_of_words) > 1 else random.choice(self.messages)
message = username + " " + message
update.message.reply_text(message)
return MessageHandler(Filters.text & ~Filters.command, reply_kudo)
|
[
"telegram.ext.MessageHandler",
"random.choice"
] |
[((1081, 1140), 'telegram.ext.MessageHandler', 'MessageHandler', (['(Filters.text & ~Filters.command)', 'reply_kudo'], {}), '(Filters.text & ~Filters.command, reply_kudo)\n', (1095, 1140), False, 'from telegram.ext import MessageHandler, Filters\n'), ((934, 962), 'random.choice', 'random.choice', (['self.messages'], {}), '(self.messages)\n', (947, 962), False, 'import random\n')]
|
from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField, SubmitField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Length, Email, EqualTo
from wtforms import ValidationError
from ..models.models import User
class RegisterForm(FlaskForm):
username = TextField('Username', validators=[DataRequired(), Length(min=3, max=32)])
email = EmailField('Email', validators=[DataRequired(), Email(), Length(min=4, max=32)])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('confirm', message="Passowrds do not match")])
confirm = PasswordField('Confirm Password')
submit = SubmitField('Submit')
def check_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("Email is already registered, please login")
def check_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError("username is already registered")
|
[
"wtforms.ValidationError",
"wtforms.validators.Length",
"wtforms.validators.Email",
"wtforms.SubmitField",
"wtforms.validators.EqualTo",
"wtforms.PasswordField",
"wtforms.validators.DataRequired"
] |
[((623, 656), 'wtforms.PasswordField', 'PasswordField', (['"""Confirm Password"""'], {}), "('Confirm Password')\n", (636, 656), False, 'from wtforms import TextField, PasswordField, SubmitField\n'), ((670, 691), 'wtforms.SubmitField', 'SubmitField', (['"""Submit"""'], {}), "('Submit')\n", (681, 691), False, 'from wtforms import TextField, PasswordField, SubmitField\n'), ((804, 864), 'wtforms.ValidationError', 'ValidationError', (['"""Email is already registered, please login"""'], {}), "('Email is already registered, please login')\n", (819, 864), False, 'from wtforms import ValidationError\n'), ((983, 1032), 'wtforms.ValidationError', 'ValidationError', (['"""username is already registered"""'], {}), "('username is already registered')\n", (998, 1032), False, 'from wtforms import ValidationError\n'), ((352, 366), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (364, 366), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((368, 389), 'wtforms.validators.Length', 'Length', ([], {'min': '(3)', 'max': '(32)'}), '(min=3, max=32)\n', (374, 389), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((436, 450), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (448, 450), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((452, 459), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (457, 459), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((461, 482), 'wtforms.validators.Length', 'Length', ([], {'min': '(4)', 'max': '(32)'}), '(min=4, max=32)\n', (467, 482), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((538, 552), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (550, 552), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n'), ((554, 606), 'wtforms.validators.EqualTo', 'EqualTo', (['"""confirm"""'], {'message': '"""Passowrds do not match"""'}), "('confirm', message='Passowrds do not match')\n", (561, 606), False, 'from wtforms.validators import DataRequired, Length, Email, EqualTo\n')]
|
"""
Base Settings - standard Django settings and others needed in all environments
"""
import os
# Filesystem path to the project directory
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# ------------------------
# DJANGO STANDARD SETTINGS
# ------------------------
# Central Time Zone
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# ------------------------
# DJANGO STANDARD SETTINGS
# ------------------------
# --------------------
# APPS SETTINGS
# --------------------
# Django base apps
DJANGO_CORE = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Any included apps from other sources
THIRD_PARTY_APPS = ()
# Application specific apps
LOCAL_APPS = ()
# --------------------
# END APPS SETTINGS
# --------------------
# -------------------------
# STATIC AND MEDIA SETTINGS
# -------------------------
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Media (file uploads, mainly)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# -----------------------------
# END STATIC AND MEDIA SETTINGS
# -----------------------------
|
[
"os.environ.get",
"os.path.abspath",
"os.path.join"
] |
[((294, 329), 'os.environ.get', 'os.environ.get', (['"""DJANGO_SECRET_KEY"""'], {}), "('DJANGO_SECRET_KEY')\n", (308, 329), False, 'import os\n'), ((2895, 2926), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""media"""'], {}), "(BASE_DIR, 'media')\n", (2907, 2926), False, 'import os\n'), ((2666, 2698), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (2678, 2698), False, 'import os\n'), ((185, 210), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (200, 210), False, 'import os\n')]
|
# generated by update to not change manually
import dataclasses as dt
import typing as t
from enum import Enum
from bungieapi.json import to_json
from bungieapi.types import ManifestReference
class DestinyActivityModeType(Enum):
"""For historical reasons, this list will have both D1 and D2-relevant
Activity Modes in it.
Please don't take this to mean that some D1-only feature is coming
back!
"""
NONE = 0
STORY = 2
STRIKE = 3
RAID = 4
ALL_PV_P = 5
PATROL = 6
ALL_PV_E = 7
RESERVED9 = 9
CONTROL = 10
RESERVED11 = 11
CLASH = 12 # Clash -> Destiny's name for Team Deathmatch. 4v4 combat, the team with the highest kills at the end of time wins.
RESERVED13 = 13
CRIMSON_DOUBLES = 15
NIGHTFALL = 16
HEROIC_NIGHTFALL = 17
ALL_STRIKES = 18
IRON_BANNER = 19
RESERVED20 = 20
RESERVED21 = 21
RESERVED22 = 22
RESERVED24 = 24
ALL_MAYHEM = 25
RESERVED26 = 26
RESERVED27 = 27
RESERVED28 = 28
RESERVED29 = 29
RESERVED30 = 30
SUPREMACY = 31
PRIVATE_MATCHES_ALL = 32
SURVIVAL = 37
COUNTDOWN = 38
TRIALS_OF_THE_NINE = 39
SOCIAL = 40
TRIALS_COUNTDOWN = 41
TRIALS_SURVIVAL = 42
IRON_BANNER_CONTROL = 43
IRON_BANNER_CLASH = 44
IRON_BANNER_SUPREMACY = 45
SCORED_NIGHTFALL = 46
SCORED_HEROIC_NIGHTFALL = 47
RUMBLE = 48
ALL_DOUBLES = 49
DOUBLES = 50
PRIVATE_MATCHES_CLASH = 51
PRIVATE_MATCHES_CONTROL = 52
PRIVATE_MATCHES_SUPREMACY = 53
PRIVATE_MATCHES_COUNTDOWN = 54
PRIVATE_MATCHES_SURVIVAL = 55
PRIVATE_MATCHES_MAYHEM = 56
PRIVATE_MATCHES_RUMBLE = 57
HEROIC_ADVENTURE = 58
SHOWDOWN = 59
LOCKDOWN = 60
SCORCHED = 61
SCORCHED_TEAM = 62
GAMBIT = 63
ALL_PV_E_COMPETITIVE = 64
BREAKTHROUGH = 65
BLACK_ARMORY_RUN = 66
SALVAGE = 67
IRON_BANNER_SALVAGE = 68
PV_P_COMPETITIVE = 69
PV_P_QUICKPLAY = 70
CLASH_QUICKPLAY = 71
CLASH_COMPETITIVE = 72
CONTROL_QUICKPLAY = 73
CONTROL_COMPETITIVE = 74
GAMBIT_PRIME = 75
RECKONING = 76
MENAGERIE = 77
VEX_OFFENSIVE = 78
NIGHTMARE_HUNT = 79
ELIMINATION = 80
MOMENTUM = 81
DUNGEON = 82
SUNDIAL = 83
TRIALS_OF_OSIRIS = 84
DARES = 85
OFFENSIVE = 86
LOST_SECTOR = 87
@dt.dataclass(frozen=True)
class DestinyHistoricalStatsDefinition:
category: "DestinyStatsCategoryType" = dt.field(
metadata={"description": "Category for the stat."}
)
group: "DestinyStatsGroupType" = dt.field(
metadata={"description": "Statistic group"}
)
icon_image: str = dt.field(
metadata={"description": "Optional URI to an icon for the statistic"}
)
modes: t.Sequence["DestinyActivityModeType"] = dt.field(
metadata={"description": "Game modes where this statistic can be reported."}
)
period_types: t.Sequence["PeriodType"] = dt.field(
metadata={"description": "Time periods the statistic covers"}
)
stat_description: str = dt.field(
metadata={"description": "Description of a stat if applicable."}
)
stat_id: str = dt.field(
metadata={"description": "Unique programmer friendly ID for this stat"}
)
stat_name: str = dt.field(metadata={"description": "Display name"})
stat_name_abbr: str = dt.field(metadata={"description": "Display name abbreviated"})
unit_label: str = dt.field(
metadata={"description": "Localized Unit Name for the stat."}
)
unit_type: "UnitType" = dt.field(
metadata={"description": "Unit, if any, for the statistic"}
)
weight: int = dt.field(
metadata={
"description": "Weight assigned to this stat indicating its relative impressiveness."
}
)
medal_tier_hash: t.Optional[
ManifestReference["DestinyMedalTierDefinition"]
] = dt.field(
default=None,
metadata={
"description": "The tier associated with this medal - be it implicitly or explicitly."
},
)
merge_method: t.Optional[int] = dt.field(
default=None, metadata={"description": "Optional icon for the statistic"}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"statId": to_json(self.stat_id),
"group": to_json(self.group),
"periodTypes": to_json(self.period_types),
"modes": to_json(self.modes),
"category": to_json(self.category),
"statName": to_json(self.stat_name),
"statNameAbbr": to_json(self.stat_name_abbr),
"statDescription": to_json(self.stat_description),
"unitType": to_json(self.unit_type),
"iconImage": to_json(self.icon_image),
"mergeMethod": to_json(self.merge_method),
"unitLabel": to_json(self.unit_label),
"weight": to_json(self.weight),
"medalTierHash": to_json(self.medal_tier_hash),
}
class DestinyStatsGroupType(Enum):
"""If the enum value is > 100, it is a "special" group that cannot be
queried for directly (special cases apply to when they are returned, and
are not relevant in general cases)"""
NONE = 0
GENERAL = 1
WEAPONS = 2
MEDALS = 3
RESERVED_GROUPS = 100 # This is purely to serve as the dividing line between filterable and un-filterable groups. Below this number is a group you can pass as a filter. Above it are groups used in very specific circumstances and not relevant for filtering.
LEADERBOARD = 101 # Only applicable while generating leaderboards.
ACTIVITY = 102 # These will *only* be consumed by GetAggregateStatsByActivity
UNIQUE_WEAPON = (
103 # These are only consumed and returned by GetUniqueWeaponHistory
)
INTERNAL = 104
PeriodTypeArray = t.Sequence["PeriodType"]
DestinyActivityModeTypeArray = t.Sequence["DestinyActivityModeType"]
class DestinyStatsCategoryType(Enum):
NONE = 0
KILLS = 1
ASSISTS = 2
DEATHS = 3
CRITICALS = 4
K_DA = 5
KD = 6
SCORE = 7
ENTERED = 8
TIME_PLAYED = 9
MEDAL_WINS = 10
MEDAL_GAME = 11
MEDAL_SPECIAL_KILLS = 12
MEDAL_SPREES = 13
MEDAL_MULTI_KILLS = 14
MEDAL_ABILITIES = 15
class UnitType(Enum):
NONE = 0
COUNT = 1 # Indicates the statistic is a simple count of something.
PER_GAME = 2 # Indicates the statistic is a per game average.
SECONDS = 3 # Indicates the number of seconds
POINTS = 4 # Indicates the number of points earned
TEAM = 5 # Values represents a team ID
DISTANCE = 6 # Values represents a distance (units to-be-determined)
PERCENT = 7 # Ratio represented as a whole value from 0 to 100.
RATIO = 8 # Ratio of something, shown with decimal places
BOOLEAN = 9 # True or false
WEAPON_TYPE = 10 # The stat is actually a weapon type.
STANDING = 11 # Indicates victory, defeat, or something in between.
MILLISECONDS = 12 # Number of milliseconds some event spanned. For example, race time, or lap time.
COMPLETION_REASON = 13 # The value is a enumeration of the Completion Reason type.
class DestinyStatsMergeMethod(Enum):
ADD = 0 # When collapsing multiple instances of the stat together, add the values.
MIN = 1 # When collapsing multiple instances of the stat together, take the lower value.
MAX = 2 # When collapsing multiple instances of the stat together, take the higher value.
class PeriodType(Enum):
NONE = 0
DAILY = 1
ALL_TIME = 2
ACTIVITY = 3
# imported at the end to do not case circular imports for type annotations
from bungieapi.generated.components.schemas.destiny.definitions import ( # noqa: E402
DestinyMedalTierDefinition,
)
|
[
"dataclasses.field",
"bungieapi.json.to_json",
"dataclasses.dataclass"
] |
[((2324, 2349), 'dataclasses.dataclass', 'dt.dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2336, 2349), True, 'import dataclasses as dt\n'), ((2433, 2493), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Category for the stat.'}"}), "(metadata={'description': 'Category for the stat.'})\n", (2441, 2493), True, 'import dataclasses as dt\n'), ((2545, 2598), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Statistic group'}"}), "(metadata={'description': 'Statistic group'})\n", (2553, 2598), True, 'import dataclasses as dt\n'), ((2635, 2714), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Optional URI to an icon for the statistic'}"}), "(metadata={'description': 'Optional URI to an icon for the statistic'})\n", (2643, 2714), True, 'import dataclasses as dt\n'), ((2780, 2870), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Game modes where this statistic can be reported.'}"}), "(metadata={'description':\n 'Game modes where this statistic can be reported.'})\n", (2788, 2870), True, 'import dataclasses as dt\n'), ((2926, 2997), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Time periods the statistic covers'}"}), "(metadata={'description': 'Time periods the statistic covers'})\n", (2934, 2997), True, 'import dataclasses as dt\n'), ((3040, 3114), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Description of a stat if applicable.'}"}), "(metadata={'description': 'Description of a stat if applicable.'})\n", (3048, 3114), True, 'import dataclasses as dt\n'), ((3148, 3233), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Unique programmer friendly ID for this stat'}"}), "(metadata={'description':\n 'Unique programmer friendly ID for this stat'})\n", (3156, 3233), True, 'import dataclasses as dt\n'), ((3265, 3315), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Display name'}"}), "(metadata={'description': 'Display name'})\n", (3273, 3315), True, 'import dataclasses as dt\n'), ((3342, 3404), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Display name abbreviated'}"}), "(metadata={'description': 'Display name abbreviated'})\n", (3350, 3404), True, 'import dataclasses as dt\n'), ((3427, 3498), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Localized Unit Name for the stat.'}"}), "(metadata={'description': 'Localized Unit Name for the stat.'})\n", (3435, 3498), True, 'import dataclasses as dt\n'), ((3541, 3610), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description': 'Unit, if any, for the statistic'}"}), "(metadata={'description': 'Unit, if any, for the statistic'})\n", (3549, 3610), True, 'import dataclasses as dt\n'), ((3643, 3753), 'dataclasses.field', 'dt.field', ([], {'metadata': "{'description':\n 'Weight assigned to this stat indicating its relative impressiveness.'}"}), "(metadata={'description':\n 'Weight assigned to this stat indicating its relative impressiveness.'})\n", (3651, 3753), True, 'import dataclasses as dt\n'), ((3883, 4008), 'dataclasses.field', 'dt.field', ([], {'default': 'None', 'metadata': "{'description':\n 'The tier associated with this medal - be it implicitly or explicitly.'}"}), "(default=None, metadata={'description':\n 'The tier associated with this medal - be it implicitly or explicitly.'})\n", (3891, 4008), True, 'import dataclasses as dt\n'), ((4086, 4173), 'dataclasses.field', 'dt.field', ([], {'default': 'None', 'metadata': "{'description': 'Optional icon for the statistic'}"}), "(default=None, metadata={'description':\n 'Optional icon for the statistic'})\n", (4094, 4173), True, 'import dataclasses as dt\n'), ((4272, 4293), 'bungieapi.json.to_json', 'to_json', (['self.stat_id'], {}), '(self.stat_id)\n', (4279, 4293), False, 'from bungieapi.json import to_json\n'), ((4316, 4335), 'bungieapi.json.to_json', 'to_json', (['self.group'], {}), '(self.group)\n', (4323, 4335), False, 'from bungieapi.json import to_json\n'), ((4364, 4390), 'bungieapi.json.to_json', 'to_json', (['self.period_types'], {}), '(self.period_types)\n', (4371, 4390), False, 'from bungieapi.json import to_json\n'), ((4413, 4432), 'bungieapi.json.to_json', 'to_json', (['self.modes'], {}), '(self.modes)\n', (4420, 4432), False, 'from bungieapi.json import to_json\n'), ((4458, 4480), 'bungieapi.json.to_json', 'to_json', (['self.category'], {}), '(self.category)\n', (4465, 4480), False, 'from bungieapi.json import to_json\n'), ((4506, 4529), 'bungieapi.json.to_json', 'to_json', (['self.stat_name'], {}), '(self.stat_name)\n', (4513, 4529), False, 'from bungieapi.json import to_json\n'), ((4559, 4587), 'bungieapi.json.to_json', 'to_json', (['self.stat_name_abbr'], {}), '(self.stat_name_abbr)\n', (4566, 4587), False, 'from bungieapi.json import to_json\n'), ((4620, 4650), 'bungieapi.json.to_json', 'to_json', (['self.stat_description'], {}), '(self.stat_description)\n', (4627, 4650), False, 'from bungieapi.json import to_json\n'), ((4676, 4699), 'bungieapi.json.to_json', 'to_json', (['self.unit_type'], {}), '(self.unit_type)\n', (4683, 4699), False, 'from bungieapi.json import to_json\n'), ((4726, 4750), 'bungieapi.json.to_json', 'to_json', (['self.icon_image'], {}), '(self.icon_image)\n', (4733, 4750), False, 'from bungieapi.json import to_json\n'), ((4779, 4805), 'bungieapi.json.to_json', 'to_json', (['self.merge_method'], {}), '(self.merge_method)\n', (4786, 4805), False, 'from bungieapi.json import to_json\n'), ((4832, 4856), 'bungieapi.json.to_json', 'to_json', (['self.unit_label'], {}), '(self.unit_label)\n', (4839, 4856), False, 'from bungieapi.json import to_json\n'), ((4880, 4900), 'bungieapi.json.to_json', 'to_json', (['self.weight'], {}), '(self.weight)\n', (4887, 4900), False, 'from bungieapi.json import to_json\n'), ((4931, 4960), 'bungieapi.json.to_json', 'to_json', (['self.medal_tier_hash'], {}), '(self.medal_tier_hash)\n', (4938, 4960), False, 'from bungieapi.json import to_json\n')]
|
import contextvars
s3_var = contextvars.ContextVar('s3')
|
[
"contextvars.ContextVar"
] |
[((30, 58), 'contextvars.ContextVar', 'contextvars.ContextVar', (['"""s3"""'], {}), "('s3')\n", (52, 58), False, 'import contextvars\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 23:53:00 2012
###############################################################################
#
# autoPACK Authors: <NAME>, <NAME>, <NAME>, <NAME>
# Based on COFFEE Script developed by <NAME> between 2005 and 2010
# with assistance from <NAME> in 2009 and periodic input
# from <NAME>'s Molecular Graphics Lab
#
# AFGui.py Authors: <NAME> with minor editing/enhancement from <NAME>
#
# Copyright: <NAME> ©2010
#
# This file "displayFill.py" is part of autoPACK, cellPACK, and AutoFill.
#
# autoPACK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autoPACK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autoPACK (See "CopyingGNUGPL" in the installation.
# If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
Name: -
@author: <NAME> with design/editing/enhancement by <NAME>
"""
# display cytoplasm spheres
verts = {}
radii = {}
r = h1.exteriorRecipe
if r :
for ingr in r.ingredients:
verts[ingr] = []
radii[ingr] = []
for pos, rot, ingr, ptInd in h1.molecules:
level = ingr.maxLevel
px = ingr.transformPoints(pos, rot, ingr.positions[level])
for ii in range(len(ingr.radii[level])):
verts[ingr].append( px[ii] )
radii[ingr].append( ingr.radii[level][ii] )
if r :
for ingr in r.ingredients:
if len(verts[ingr]):
if ingr.modelType=='Spheres':
sph = Spheres('spheres', inheritMaterial=0,
centers=verts[ingr], materials=[ingr.color],
radii=radii[ingr], visible=1)
vi.AddObject(sph, parent=orgaToMasterGeom[ingr])
## elif ingr.modelType=='Cylinders':
## cyl = Cylinders('Cylinders', inheritMaterial=0,
## vertices=verts[ingr], materials=[ingr.color],
## radii=radii[ingr], visible=1)
## vi.AddObject(cyl, parent=orgaToMasterGeom[ingr])
# display cytoplasm meshes
r = h1.exteriorRecipe
if r :
meshGeoms = {}
for pos, rot, ingr, ptInd in h1.molecules:
if ingr.mesh: # display mesh
geom = ingr.mesh
mat = rot.copy()
mat[:3, 3] = pos
if not meshGeoms.has_key(geom):
meshGeoms[geom] = [mat]
geom.Set(materials=[ingr.color], inheritMaterial=0, visible=0)
else:
meshGeoms[geom].append(mat)
vi.AddObject(geom, parent=orgaToMasterGeom[ingr])
for geom, mats in meshGeoms.items():
geom.Set(instanceMatrices=mats, visible=1)
# display organelle spheres
for orga in h1.organelles:
verts = {}
radii = {}
rs = orga.surfaceRecipe
if rs :
for ingr in rs.ingredients:
verts[ingr] = []
radii[ingr] = []
ri = orga.innerRecipe
if ri:
for ingr in ri.ingredients:
verts[ingr] = []
radii[ingr] = []
for pos, rot, ingr, ptInd in orga.molecules:
level = ingr.maxLevel
px = ingr.transformPoints(pos, rot, ingr.positions[level])
if ingr.modelType=='Spheres':
for ii in range(len(ingr.radii[level])):
verts[ingr].append( px[ii] )
radii[ingr].append( ingr.radii[level][ii] )
elif ingr.modelType=='Cylinders':
px2 = ingr.transformPoints(pos, rot, ingr.positions2[level])
for ii in range(len(ingr.radii[level])):
verts[ingr].append( px[ii] )
verts[ingr].append( px2[ii] )
radii[ingr].append( ingr.radii[level][ii] )
radii[ingr].append( ingr.radii[level][ii] )
if rs :
for ingr in rs.ingredients:
if len(verts[ingr]):
if ingr.modelType=='Spheres':
sph = Spheres('spheres', inheritMaterial=False,
centers=verts[ingr], radii=radii[ingr],
materials=[ingr.color], visible=0)
vi.AddObject(sph, parent=orgaToMasterGeom[ingr])
elif ingr.modelType=='Cylinders':
v = numpy.array(verts[ingr])
f = numpy.arange(len(v))
f.shape=(-1,2)
cyl = Cylinders('Cylinders', inheritMaterial=0,
vertices=v, faces=f, materials=[ingr.color],
radii=radii[ingr], visible=1,
inheritCulling=0, culling='None',
inheritFrontPolyMode=0, frontPolyMode='line')
vi.AddObject(cyl, parent=orgaToMasterGeom[ingr])
if ri:
for ingr in ri.ingredients:
if len(verts[ingr]):
if ingr.modelType=='Spheres':
sph = Spheres('spheres', inheritMaterial=False,
centers=verts[ingr], radii=radii[ingr],
materials=[ingr.color], visible=0)
vi.AddObject(sph, parent=orgaToMasterGeom[ingr])
elif ingr.modelType=='Cylinders':
v = numpy.array(verts[ingr])
f = numpy.arange(len(v))
f.shape=(-1,2)
cyl = Cylinders('Cylinders', inheritMaterial=0,
vertices=v, faces=f, materials=[ingr.color],
radii=radii[ingr], visible=1,
inheritCulling=0, culling='None',
inheritFrontPolyMode=0, frontPolyMode='line')
vi.AddObject(cyl, parent=orgaToMasterGeom[ingr])
# display organelle meshes
for orga in h1.organelles:
matrices = {}
rs = orga.surfaceRecipe
if rs :
for ingr in rs.ingredients:
if ingr.mesh: # display mesh
matrices[ingr] = []
ingr.mesh.Set(materials=[ingr.color], inheritMaterial=0)
ri = orga.innerRecipe
if ri :
for ingr in ri.ingredients:
if ingr.mesh: # display mesh
matrices[ingr] = []
ingr.mesh.Set(materials=[ingr.color], inheritMaterial=0)
for pos, rot, ingr, ptInd in orga.molecules:
if ingr.mesh: # display mesh
geom = ingr.mesh
mat = rot.copy()
mat[:3, 3] = pos
matrices[ingr].append(mat)
vi.AddObject(geom, parent=orgaToMasterGeom[ingr])
for ingr, mats in matrices.items():
geom = ingr.mesh
geom.Set(instanceMatrices=mats, visible=1)
vi.AddObject(geom, parent=orgaToMasterGeom[ingr])
from DejaVu.colorTool import RGBRamp, Map
verts = []
labels = []
for i, value in enumerate(h1.distToClosestSurf):
if h1.gridPtId[i]==1:
verts.append( h1.masterGridPositions[i] )
labels.append("%.2f"%value)
lab = GlfLabels('distanceLab', vertices=verts, labels=labels, visible=0)
vi.AddObject(lab)
# display grid points with positive distances left
verts = []
rads = []
for pt in h1.freePointsAfterFill[:h1.nbFreePointsAfterFill]:
d = h1.distancesAfterFill[pt]
if d>h1.smallestProteinSize-0.001:
verts.append(h1.masterGridPositions[pt])
rads.append(d)
if len(verts):
sph1 = Spheres('unusedSph', centers=verts, radii=rads, inheritFrontPolyMode=0,
frontPolyMode='line', visible=0)
vi.AddObject(sph1)
if len(verts):
pts1 = Points('unusedPts', vertices=verts, inheritPointWidth=0,
pointWidth=4, inheritMaterial=0, materials=[(0,1,0)], visible=0)
vi.AddObject(pts1)
verts = []
for pt in h1.freePointsAfterFill[:h1.nbFreePointsAfterFill]:
verts.append(h1.masterGridPositions[pt])
unpts = Points('unused Grid Points', vertices=verts, inheritMaterial=0,
materials=[green], visible=0)
vi.AddObject(unpts)
verts = []
for pt in h1.freePointsAfterFill[h1.nbFreePointsAfterFill:]:
verts.append(h1.masterGridPositions[pt])
uspts = Points('used Grid Points', vertices=verts, inheritMaterial=0,
materials=[red], visible=0)
vi.AddObject(uspts)
if hasattr(h1, 'jitter vectors'):
from DejaVu.Polylines import Polylines
verts = []
for p1, p2 in (h1.jitterVectors):
verts.append( (p1, p2))
jv = Polylines('jitter vectors', vertices=verts, visible=1,
inheritLineWidth=0, lineWidth=4)
vi.AddObject(jv, parent=orgaToMasterGeom[h1])
def dspMesh(geom):
for c in geom.children:
if c.name=='mesh':
c.Set(visible=1)
def undspMesh(geom):
for c in geom.children:
if c.name=='mesh':
c.Set(visible=0)
def dspSph(geom):
for c in geom.children:
if c.name=='spheres':
c.Set(visible=1)
def undspSph(geom):
for c in geom.children:
if c.name=='spheres':
c.Set(visible=0)
def showHide(func):
r = h1.exteriorRecipe
if r :
for ingr in r.ingredients:
master = orgaToMasterGeom[ingr]
func(master)
for orga in h1.organelles:
rs = orga.surfaceRecipe
if rs :
for ingr in rs.ingredients:
master = orgaToMasterGeom[ingr]
func(master)
ri = orga.innerRecipe
if ri:
for ingr in ri.ingredients:
master = orgaToMasterGeom[ingr]
func(master)
showHide(dspMesh)
showHide(undspSph)
|
[
"DejaVu.Polylines.Polylines"
] |
[((8923, 9014), 'DejaVu.Polylines.Polylines', 'Polylines', (['"""jitter vectors"""'], {'vertices': 'verts', 'visible': '(1)', 'inheritLineWidth': '(0)', 'lineWidth': '(4)'}), "('jitter vectors', vertices=verts, visible=1, inheritLineWidth=0,\n lineWidth=4)\n", (8932, 9014), False, 'from DejaVu.Polylines import Polylines\n')]
|
#
# author: <NAME>
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
from warnings import warn
from random import randint
from rdftools.gcityhash import city64
from rdftools.log import logger
from rdftools.raptorutil import KB
from rdftools.tools.base import ParserVisitorTool
from rdftools.tools.bloom import ScalableBloomFilter, check, add
from voidgen import INIT_CAPACITY_MED, FP_ERR_RATE, MIL
__author__ = 'basca'
def encode(keys, key_literals, value):
_key = city64(value)
while True:
key = '%d' % _key
mapping = '%s->%s' % (key, value)
if not check(keys, key):
# no collision
add(keys, key)
key_literals.add(mapping)
return key
else:
# a possible collision
if not key_literals.check(mapping):
logger.warn('[collision detected]')
_key += randint(0, MIL)
else:
return key
class RdfEncoder(ParserVisitorTool):
def __init__(self, source_file, capacity_triples=INIT_CAPACITY_MED):
super(RdfEncoder, self).__init__(source_file, capacity_triples=capacity_triples)
self.keys = ScalableBloomFilter(capacity_triples, FP_ERR_RATE)
self.key_literals = ScalableBloomFilter(capacity_triples, FP_ERR_RATE)
self.t_count = 0
self.out_file = io.open('%s.ent' % source_file, 'wb+', buffering=512 * KB)
# loop optimisations
self.write = self.out_file.write
def __del__(self):
self.out_file.close()
def on_visit(self, s, p, o, c):
if self.t_count % 50000 == 0 and self.t_count > 0:
self._log.info('[processed {0} triples]'.format(self.t_count))
self.write('%s %s %s\n' % (
encode(self.keys, self.key_literals, s),
encode(self.keys, self.key_literals, p),
encode(self.keys, self.key_literals, o)
))
self.t_count += 1
def get_results(self):
return True
|
[
"rdftools.tools.bloom.ScalableBloomFilter",
"rdftools.tools.bloom.check",
"rdftools.log.logger.warn",
"rdftools.gcityhash.city64",
"random.randint",
"rdftools.tools.bloom.add",
"io.open"
] |
[((1023, 1036), 'rdftools.gcityhash.city64', 'city64', (['value'], {}), '(value)\n', (1029, 1036), False, 'from rdftools.gcityhash import city64\n'), ((1725, 1775), 'rdftools.tools.bloom.ScalableBloomFilter', 'ScalableBloomFilter', (['capacity_triples', 'FP_ERR_RATE'], {}), '(capacity_triples, FP_ERR_RATE)\n', (1744, 1775), False, 'from rdftools.tools.bloom import ScalableBloomFilter, check, add\n'), ((1804, 1854), 'rdftools.tools.bloom.ScalableBloomFilter', 'ScalableBloomFilter', (['capacity_triples', 'FP_ERR_RATE'], {}), '(capacity_triples, FP_ERR_RATE)\n', (1823, 1854), False, 'from rdftools.tools.bloom import ScalableBloomFilter, check, add\n'), ((1904, 1962), 'io.open', 'io.open', (["('%s.ent' % source_file)", '"""wb+"""'], {'buffering': '(512 * KB)'}), "('%s.ent' % source_file, 'wb+', buffering=512 * KB)\n", (1911, 1962), False, 'import io\n'), ((1136, 1152), 'rdftools.tools.bloom.check', 'check', (['keys', 'key'], {}), '(keys, key)\n', (1141, 1152), False, 'from rdftools.tools.bloom import ScalableBloomFilter, check, add\n'), ((1193, 1207), 'rdftools.tools.bloom.add', 'add', (['keys', 'key'], {}), '(keys, key)\n', (1196, 1207), False, 'from rdftools.tools.bloom import ScalableBloomFilter, check, add\n'), ((1382, 1417), 'rdftools.log.logger.warn', 'logger.warn', (['"""[collision detected]"""'], {}), "('[collision detected]')\n", (1393, 1417), False, 'from rdftools.log import logger\n'), ((1442, 1457), 'random.randint', 'randint', (['(0)', 'MIL'], {}), '(0, MIL)\n', (1449, 1457), False, 'from random import randint\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 02:13:59 2019
@author: <NAME>
"""
import tensorflow as tf
def Transferred_MobileNetV2():
# Download MobileNet-V2 with pretrained weight on ImageNet
model = tf.keras.applications.MobileNetV2(weights='imagenet')
# model.summary()
# Trim off the last FC layer
base_model = tf.keras.Model(inputs=model.inputs, outputs=model.layers[-2].output)
base_model.trainable = False # Freeze the convolutional base
# base_model.summary()
# Reconstruct the FC layer using functional API
# print(model.layers[-1].activation)
x = base_model(model.inputs)
x = tf.keras.layers.Dense(3, activation='softmax')(x)
new_model = tf.keras.Model(inputs=model.inputs, outputs=x)
# new_model.summary()
# Or we can use the simpler sequential API
fc_layer = tf.keras.layers.Dense(3, activation='softmax')
new_model = tf.keras.Sequential([
base_model,
fc_layer])
return new_model
def New_MobileNetV2():
# Download MobileNet-V2 with pretrained weight on ImageNet
model = tf.keras.applications.MobileNetV2(weights='imagenet')
# Trim off the last FC layer
base_model = tf.keras.Model(inputs=model.inputs, outputs=model.layers[-2].output)
# Add the lasy FC layer
fc_layer = tf.keras.layers.Dense(3, activation='softmax')
new_model = tf.keras.Sequential([
base_model,
fc_layer])
new_model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return new_model
if __name__ == '__main__':
model = Transferred_MobileNetV2()
model.summary()
# model = New_MobileNetV2()
# model.summary()
|
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.Sequential",
"tensorflow.keras.applications.MobileNetV2"
] |
[((218, 271), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (251, 271), True, 'import tensorflow as tf\n'), ((344, 412), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'model.inputs', 'outputs': 'model.layers[-2].output'}), '(inputs=model.inputs, outputs=model.layers[-2].output)\n', (358, 412), True, 'import tensorflow as tf\n'), ((705, 751), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'model.inputs', 'outputs': 'x'}), '(inputs=model.inputs, outputs=x)\n', (719, 751), True, 'import tensorflow as tf\n'), ((840, 886), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (861, 886), True, 'import tensorflow as tf\n'), ((903, 946), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[base_model, fc_layer]'], {}), '([base_model, fc_layer])\n', (922, 946), True, 'import tensorflow as tf\n'), ((1093, 1146), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1126, 1146), True, 'import tensorflow as tf\n'), ((1198, 1266), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'model.inputs', 'outputs': 'model.layers[-2].output'}), '(inputs=model.inputs, outputs=model.layers[-2].output)\n', (1212, 1266), True, 'import tensorflow as tf\n'), ((1311, 1357), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (1332, 1357), True, 'import tensorflow as tf\n'), ((1374, 1417), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[base_model, fc_layer]'], {}), '([base_model, fc_layer])\n', (1393, 1417), True, 'import tensorflow as tf\n'), ((639, 685), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (660, 685), True, 'import tensorflow as tf\n'), ((1476, 1502), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (1500, 1502), True, 'import tensorflow as tf\n')]
|
from xlab.data.importer.iex.api import base
class IexSymbolsApi:
def __init__(self, token: str = ''):
self._client = base.SimpleIexApiHttpClient(token, 'ref-data/symbols',
lambda: {})
def get_symbols(self):
return self._client.call()
|
[
"xlab.data.importer.iex.api.base.SimpleIexApiHttpClient"
] |
[((132, 199), 'xlab.data.importer.iex.api.base.SimpleIexApiHttpClient', 'base.SimpleIexApiHttpClient', (['token', '"""ref-data/symbols"""', '(lambda : {})'], {}), "(token, 'ref-data/symbols', lambda : {})\n", (159, 199), False, 'from xlab.data.importer.iex.api import base\n')]
|
from django.db import models
from django.conf import settings
import os
import json
class Importer(models.Model):
class Meta:
abstract = True
def get_json_file( file ):
path = settings.APPS_DIR
data = None
with open( os.path.join(path,file), encoding='utf-8' ) as f:
data = json.load(f)
return data
|
[
"json.load",
"os.path.join"
] |
[((331, 343), 'json.load', 'json.load', (['f'], {}), '(f)\n', (340, 343), False, 'import json\n'), ((262, 286), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (274, 286), False, 'import os\n')]
|
""" validator/util.py
Utilities and methods to help with data validation.
"""
import re
# See: https://emailregex.com/
EMAIL_REGEX = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
# See: https://codereview.stackexchange.com/questions/19663/http-url-validating
URL_REGEX = re.compile(
r'(^(?:http|ftp)s?://)?' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
class ValidatorNotFoundError(Exception):
""" Validator not found exception.
A custom exception that should be raised whenever the specified data
validator schema is not found.
"""
def __init__(self, message):
super(ValidatorNotFoundError, self).__init__(message)
self.message = message
class SchemaFormatError(Exception):
""" Schema validation failed exception.
A custom exception that should be raised whenever a schema file cannot be
validated.
"""
def __init__(self, message):
super(SchemaFormatError, self).__init__(message)
self.message = message
|
[
"re.compile"
] |
[((137, 202), 're.compile', 're.compile', (['"""(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)"""'], {}), "('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n", (147, 202), False, 'import re\n'), ((296, 547), 're.compile', 're.compile', (['"""(^(?:http|ftp)s?://)?(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\[?[A-F0-9]*:[A-F0-9:]+\\\\]?)(?::\\\\d+)?(?:/?|[/?]\\\\S+)$"""', 're.IGNORECASE'], {}), "(\n '(^(?:http|ftp)s?://)?(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|localhost|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}|\\\\[?[A-F0-9]*:[A-F0-9:]+\\\\]?)(?::\\\\d+)?(?:/?|[/?]\\\\S+)$'\n , re.IGNORECASE)\n", (306, 547), False, 'import re\n')]
|
# Landsat Util
# License: CC0 1.0 Universal
"""Tests for landsat"""
import sys
import errno
import shutil
import unittest
import subprocess
from tempfile import mkdtemp
from os.path import join, abspath, dirname
try:
import landsat.landsat as landsat
except ImportError:
sys.path.append(abspath(join(dirname(__file__), '../landsat')))
import landsat.landsat as landsat
class TestLandsat(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_folder = mkdtemp()
cls.base_dir = abspath(dirname(__file__))
cls.landsat_image = join(cls.base_dir, 'samples', 'test.tar.bz2')
cls.parser = landsat.args_options()
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.temp_folder)
shutil.rmtree(join(cls.base_dir, 'samples', 'test'))
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def system_exit(self, args, code):
try:
landsat.main(self.parser.parse_args(args))
except SystemExit as e:
self.assertEqual(e.code, code)
def test_incorrect_date(self):
""" Test search with incorrect date input """
args = ['search', '--start', 'berlin', '--end', 'january 10 2014']
self.system_exit(args, 1)
def test_too_many_results(self):
""" Test when search return too many results """
args = ['search', '--cloud', '100', '-p', '205,022,206,022,204,022']
self.system_exit(args, 1)
def test_search_pr_correct(self):
"""Test Path Row search with correct input"""
args = ['search', '--start', 'january 1 2013', '--end',
'january 10 2014', '-p', '008,008']
self.system_exit(args, 0)
def test_search_lat_lon(self):
"""Test Latitude Longitude search with correct input"""
args = ['search', '--start', 'may 01 2013', '--end', 'may 08 2013',
'--lat', '38.9107203', '--lon', '-77.0290116']
self.system_exit(args, 0)
def test_search_pr_wrong_input(self):
"""Test Path Row search with incorrect input"""
args = ['search', '-p', 'what?']
self.system_exit(args, 1)
def test_download_correct(self):
"""Test download command with correct input"""
args = ['download', 'LC80010092015051LGN00', '-b', '11,', '-d', self.temp_folder]
self.system_exit(args, 0)
def test_download_incorrect(self):
"""Test download command with incorrect input"""
args = ['download', 'LT813600']
self.system_exit(args, 1)
def test_process_correct(self):
"""Test process command with correct input"""
args = ['process', self.landsat_image]
self.system_exit(args, 0)
def test_process_correct_pansharpen(self):
"""Test process command with correct input and pansharpening"""
args = ['process', '--pansharpen', self.landsat_image]
self.system_exit(args, 0)
def test_process_incorrect(self):
"""Test process command with incorrect input"""
args = ['process', 'whatever']
self.system_exit(args, 1)
def check_command_line(self):
""" Check if the commandline performs correctly """
self.assertEqual(subprocess.call(['python', join(self.base_dir, '../landsat.py'), '-h']), 0)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.dirname",
"tempfile.mkdtemp",
"landsat.landsat.args_options",
"shutil.rmtree",
"os.path.join"
] |
[((3398, 3413), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3411, 3413), False, 'import unittest\n'), ((493, 502), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (500, 502), False, 'from tempfile import mkdtemp\n'), ((581, 626), 'os.path.join', 'join', (['cls.base_dir', '"""samples"""', '"""test.tar.bz2"""'], {}), "(cls.base_dir, 'samples', 'test.tar.bz2')\n", (585, 626), False, 'from os.path import join, abspath, dirname\n'), ((648, 670), 'landsat.landsat.args_options', 'landsat.args_options', ([], {}), '()\n', (668, 670), True, 'import landsat.landsat as landsat\n'), ((534, 551), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (541, 551), False, 'from os.path import join, abspath, dirname\n'), ((742, 772), 'shutil.rmtree', 'shutil.rmtree', (['cls.temp_folder'], {}), '(cls.temp_folder)\n', (755, 772), False, 'import shutil\n'), ((799, 836), 'os.path.join', 'join', (['cls.base_dir', '"""samples"""', '"""test"""'], {}), "(cls.base_dir, 'samples', 'test')\n", (803, 836), False, 'from os.path import join, abspath, dirname\n'), ((311, 328), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (318, 328), False, 'from os.path import join, abspath, dirname\n'), ((3316, 3352), 'os.path.join', 'join', (['self.base_dir', '"""../landsat.py"""'], {}), "(self.base_dir, '../landsat.py')\n", (3320, 3352), False, 'from os.path import join, abspath, dirname\n')]
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..','..'))
import numpy as np
import commpy
from sdr_utils import vector as vec
from sdr_utils import plot_two_signals
class synchronization():
def __init__(self, param):
self.halfpreamble = param.halfpreamble
self.full_pream_len = len(self.halfpreamble)*2
self.Ncp = param.Ncp
self.Ncs = param.Ncs
self.K = param.K
self.M = param.M
# self.B = param.B
self.N = param.K * param.M
# def sync(self,data):
# ''' returns tupel of 2-D arrrays (payload, preamble), where the 1st dimension is number of detected
# preambles and the second dimension is the length on payload / data'''
# preamble_starts = self.detect_preamble_starts(data)
# preamble = np.vstack([data[(start):(start + self.full_pream_len)] for start in preamble_starts])
# payload = np.vstack(
# [data[(start + self.full_pream_len + self.Ncs+ self.Ncp ) + np.arange(self.N)] for start in preamble_starts]
# )
# return (payload, preamble)
def detect_preamble_starts(self,data):
self.half_peam_len = len(self.halfpreamble)
metric = self._calc_metric(data)
peak_locs = self._find_metric_peaks(metric)
#peak_locs = self._find_single_peak(metric)
preamble_starts = peak_locs - int(self.half_peam_len/2)
#print(preamble_starts)
#plot_two_signals(data, metric) # XXX
return preamble_starts
def _calc_metric(self,data):
cross_corr = np.correlate(data,self.halfpreamble, mode = "same")
#plot_two_signals(data, cross_corr) # XXX
metric = cross_corr + vec.shift(cross_corr, -self.half_peam_len)
metric = (metric.real)**2 + (metric.imag)**2
#plot_two_signals(cross_corr, metric) # XXX
autocorr = self._calc_moving_autocorr(data)
#plot_two_signals(metric/max(abs(metric)), autocorr/max(abs(autocorr)), same_axis=False)
return metric*autocorr
def _calc_threshold(self, metric):
half_peam_len = len(self.halfpreamble)
threshold = np.empty(len(metric))
metric = np.pad(metric,(half_peam_len,0),'constant')
for i in range(half_peam_len+1, len(metric)):
window = metric[i-half_peam_len:i]
window[np.where(window>threshold[i-half_peam_len-1])]/=2
threshold[i-half_peam_len] = np.sum(window)
threshold = vec.shift(threshold/2,half_peam_len//4,mode="same",fill_value=threshold[-1]/2)
threshold[np.where(threshold<max(metric)/100)]= max(metric)/100
#plot_two_signals(metric, threshold, same_axis=True)
return threshold
def _find_metric_peaks(self, metric):
threshold = self._calc_threshold(metric)
# step 1: find peaks
locs = np.where(
(metric>vec.shift(metric,1)) & (metric>vec.shift(metric,-1,fill_value=metric[-1])) &
(metric>threshold)
)[0]
# step 2: find max peak in window with length of fullpreamble
last_peak = metric[locs[0]]
last_loc = locs[0]
locs_out = np.array([],int)
for l in locs:
if ((l - last_loc) > (self.half_peam_len+5)):
locs_out = np.append(locs_out,last_loc)
last_peak = 0.0
if last_peak<metric[l]:
last_peak = metric[l]
last_loc = l
locs_out = np.append(locs_out,last_loc)
return locs_out
def _find_single_peak(self, metric):
threshold = self._calc_threshold(metric)
loc = np.argmax(metric)
if metric[loc]>threshold[loc]:
return np.array([loc])
else:
return np.array([])
def _calc_moving_autocorr(self, data):
half_peam_len = len(self.halfpreamble)
Ncp_cs = self.Ncp + self.Ncs
autocorr = data * vec.shift(data,half_peam_len)
autocorr_metric = np.empty_like(autocorr)
for i in range(half_peam_len, len(autocorr)):
autocorr_metric[i-half_peam_len] = np.sum(autocorr[i-half_peam_len:i])
autocorr_metric = np.abs(autocorr_metric)
autocorr_out = np.empty_like(autocorr_metric)
for i in range(Ncp_cs-2, len(autocorr)):
autocorr_out[i-Ncp_cs-2] = np.sum(autocorr_metric[i-Ncp_cs-2:i])
return autocorr_out/max(autocorr_out) #vec.shift(autocorr_out,Ncp_cs+half_peam_len)
|
[
"numpy.pad",
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"os.path.dirname",
"numpy.empty_like",
"sdr_utils.vector.shift",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.correlate"
] |
[((47, 72), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (62, 72), False, 'import sys, os\n'), ((1644, 1694), 'numpy.correlate', 'np.correlate', (['data', 'self.halfpreamble'], {'mode': '"""same"""'}), "(data, self.halfpreamble, mode='same')\n", (1656, 1694), True, 'import numpy as np\n'), ((2263, 2309), 'numpy.pad', 'np.pad', (['metric', '(half_peam_len, 0)', '"""constant"""'], {}), "(metric, (half_peam_len, 0), 'constant')\n", (2269, 2309), True, 'import numpy as np\n'), ((2558, 2650), 'sdr_utils.vector.shift', 'vec.shift', (['(threshold / 2)', '(half_peam_len // 4)'], {'mode': '"""same"""', 'fill_value': '(threshold[-1] / 2)'}), "(threshold / 2, half_peam_len // 4, mode='same', fill_value=\n threshold[-1] / 2)\n", (2567, 2650), True, 'from sdr_utils import vector as vec\n'), ((3268, 3285), 'numpy.array', 'np.array', (['[]', 'int'], {}), '([], int)\n', (3276, 3285), True, 'import numpy as np\n'), ((3584, 3613), 'numpy.append', 'np.append', (['locs_out', 'last_loc'], {}), '(locs_out, last_loc)\n', (3593, 3613), True, 'import numpy as np\n'), ((3745, 3762), 'numpy.argmax', 'np.argmax', (['metric'], {}), '(metric)\n', (3754, 3762), True, 'import numpy as np\n'), ((4103, 4126), 'numpy.empty_like', 'np.empty_like', (['autocorr'], {}), '(autocorr)\n', (4116, 4126), True, 'import numpy as np\n'), ((4293, 4316), 'numpy.abs', 'np.abs', (['autocorr_metric'], {}), '(autocorr_metric)\n', (4299, 4316), True, 'import numpy as np\n'), ((4341, 4371), 'numpy.empty_like', 'np.empty_like', (['autocorr_metric'], {}), '(autocorr_metric)\n', (4354, 4371), True, 'import numpy as np\n'), ((1778, 1820), 'sdr_utils.vector.shift', 'vec.shift', (['cross_corr', '(-self.half_peam_len)'], {}), '(cross_corr, -self.half_peam_len)\n', (1787, 1820), True, 'from sdr_utils import vector as vec\n'), ((2522, 2536), 'numpy.sum', 'np.sum', (['window'], {}), '(window)\n', (2528, 2536), True, 'import numpy as np\n'), ((3823, 3838), 'numpy.array', 'np.array', (['[loc]'], {}), '([loc])\n', (3831, 3838), True, 'import numpy as np\n'), ((3874, 3886), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3882, 3886), True, 'import numpy as np\n'), ((4046, 4076), 'sdr_utils.vector.shift', 'vec.shift', (['data', 'half_peam_len'], {}), '(data, half_peam_len)\n', (4055, 4076), True, 'from sdr_utils import vector as vec\n'), ((4230, 4267), 'numpy.sum', 'np.sum', (['autocorr[i - half_peam_len:i]'], {}), '(autocorr[i - half_peam_len:i])\n', (4236, 4267), True, 'import numpy as np\n'), ((4462, 4503), 'numpy.sum', 'np.sum', (['autocorr_metric[i - Ncp_cs - 2:i]'], {}), '(autocorr_metric[i - Ncp_cs - 2:i])\n', (4468, 4503), True, 'import numpy as np\n'), ((2430, 2481), 'numpy.where', 'np.where', (['(window > threshold[i - half_peam_len - 1])'], {}), '(window > threshold[i - half_peam_len - 1])\n', (2438, 2481), True, 'import numpy as np\n'), ((3396, 3425), 'numpy.append', 'np.append', (['locs_out', 'last_loc'], {}), '(locs_out, last_loc)\n', (3405, 3425), True, 'import numpy as np\n'), ((2976, 2996), 'sdr_utils.vector.shift', 'vec.shift', (['metric', '(1)'], {}), '(metric, 1)\n', (2985, 2996), True, 'from sdr_utils import vector as vec\n'), ((3007, 3051), 'sdr_utils.vector.shift', 'vec.shift', (['metric', '(-1)'], {'fill_value': 'metric[-1]'}), '(metric, -1, fill_value=metric[-1])\n', (3016, 3051), True, 'from sdr_utils import vector as vec\n')]
|
import os
import argparse
import numpy as np
from tqdm import tqdm
from utils.audio import AudioProcessor
from utils.text import phoneme_to_sequence
def load_metadata(metadata_file):
items = []
with open(metadata_file, 'r') as fp:
for line in fp:
cols = line.split('|')
wav_file = cols[0] + '.wav'
text = cols[1]
items.append([text, wav_file])
return items
def generate_phoneme_sequence(text, phoneme_file):
phonemes = phoneme_to_sequence(text, ['phoneme_cleaners'],
language='en-us',
enable_eos_bos=False)
phonemes = np.asarray(phonemes, dtype=np.int32)
np.save(phoneme_file, phonemes)
return phonemes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Extract phonemes and melspectrograms from LJSpecch for training Tacotron')
parser.add_argument('data_root', type=str, help='Data root directory')
args = parser.parse_args()
wav_dir = os.path.join(args.data_root, 'wavs')
if not os.path.exists(wav_dir):
raise FileNotFoundError('{} not found'.format(wav_dir))
metadata_file = os.path.join(args.data_root, 'metadata.csv')
if not os.path.exists(metadata_file):
raise FileNotFoundError('{} not found'.format(metadata_file))
melspec_dir = os.path.join(args.data_root, 'melspec')
if not os.path.exists(melspec_dir):
os.makedirs(melspec_dir, exist_ok=True)
spec_dir = os.path.join(args.data_root, 'spec')
if not os.path.exists(spec_dir):
os.makedirs(spec_dir, exist_ok=True)
phoneme_dir = os.path.join(args.data_root, 'phoneme')
if not os.path.exists(phoneme_dir):
os.makedirs(phoneme_dir, exist_ok=True)
items = load_metadata(metadata_file)
ap = AudioProcessor()
for text, wav_file in tqdm(items):
prefix = wav_file.replace('.wav', '')
# 音素系列を生成
generate_phoneme_sequence(
text, os.path.join(phoneme_dir, prefix + '.npy'))
wav = np.array(ap.load_wav(os.path.join(wav_dir, wav_file)),
dtype=np.float32)
# メルスペクトログラムを生成
melspec = ap.melspectrogram(wav).astype('float32')
np.save(os.path.join(melspec_dir, prefix + '.npy'), melspec)
# 線形スペクトログラムを生成
spec = ap.spectrogram(wav).astype('float32')
np.save(os.path.join(spec_dir, prefix + '.npy'), spec)
|
[
"tqdm.tqdm",
"numpy.save",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.asarray",
"os.path.exists",
"utils.text.phoneme_to_sequence",
"utils.audio.AudioProcessor",
"os.path.join"
] |
[((495, 586), 'utils.text.phoneme_to_sequence', 'phoneme_to_sequence', (['text', "['phoneme_cleaners']"], {'language': '"""en-us"""', 'enable_eos_bos': '(False)'}), "(text, ['phoneme_cleaners'], language='en-us',\n enable_eos_bos=False)\n", (514, 586), False, 'from utils.text import phoneme_to_sequence\n'), ((668, 704), 'numpy.asarray', 'np.asarray', (['phonemes'], {'dtype': 'np.int32'}), '(phonemes, dtype=np.int32)\n', (678, 704), True, 'import numpy as np\n'), ((709, 740), 'numpy.save', 'np.save', (['phoneme_file', 'phonemes'], {}), '(phoneme_file, phonemes)\n', (716, 740), True, 'import numpy as np\n'), ((803, 919), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract phonemes and melspectrograms from LJSpecch for training Tacotron"""'}), "(description=\n 'Extract phonemes and melspectrograms from LJSpecch for training Tacotron')\n", (826, 919), False, 'import argparse\n'), ((1045, 1081), 'os.path.join', 'os.path.join', (['args.data_root', '"""wavs"""'], {}), "(args.data_root, 'wavs')\n", (1057, 1081), False, 'import os\n'), ((1203, 1247), 'os.path.join', 'os.path.join', (['args.data_root', '"""metadata.csv"""'], {}), "(args.data_root, 'metadata.csv')\n", (1215, 1247), False, 'import os\n'), ((1379, 1418), 'os.path.join', 'os.path.join', (['args.data_root', '"""melspec"""'], {}), "(args.data_root, 'melspec')\n", (1391, 1418), False, 'import os\n'), ((1523, 1559), 'os.path.join', 'os.path.join', (['args.data_root', '"""spec"""'], {}), "(args.data_root, 'spec')\n", (1535, 1559), False, 'import os\n'), ((1661, 1700), 'os.path.join', 'os.path.join', (['args.data_root', '"""phoneme"""'], {}), "(args.data_root, 'phoneme')\n", (1673, 1700), False, 'import os\n'), ((1840, 1856), 'utils.audio.AudioProcessor', 'AudioProcessor', ([], {}), '()\n', (1854, 1856), False, 'from utils.audio import AudioProcessor\n'), ((1884, 1895), 'tqdm.tqdm', 'tqdm', (['items'], {}), '(items)\n', (1888, 1895), False, 'from tqdm import tqdm\n'), ((1093, 1116), 'os.path.exists', 'os.path.exists', (['wav_dir'], {}), '(wav_dir)\n', (1107, 1116), False, 'import os\n'), ((1259, 1288), 'os.path.exists', 'os.path.exists', (['metadata_file'], {}), '(metadata_file)\n', (1273, 1288), False, 'import os\n'), ((1430, 1457), 'os.path.exists', 'os.path.exists', (['melspec_dir'], {}), '(melspec_dir)\n', (1444, 1457), False, 'import os\n'), ((1467, 1506), 'os.makedirs', 'os.makedirs', (['melspec_dir'], {'exist_ok': '(True)'}), '(melspec_dir, exist_ok=True)\n', (1478, 1506), False, 'import os\n'), ((1571, 1595), 'os.path.exists', 'os.path.exists', (['spec_dir'], {}), '(spec_dir)\n', (1585, 1595), False, 'import os\n'), ((1605, 1641), 'os.makedirs', 'os.makedirs', (['spec_dir'], {'exist_ok': '(True)'}), '(spec_dir, exist_ok=True)\n', (1616, 1641), False, 'import os\n'), ((1712, 1739), 'os.path.exists', 'os.path.exists', (['phoneme_dir'], {}), '(phoneme_dir)\n', (1726, 1739), False, 'import os\n'), ((1749, 1788), 'os.makedirs', 'os.makedirs', (['phoneme_dir'], {'exist_ok': '(True)'}), '(phoneme_dir, exist_ok=True)\n', (1760, 1788), False, 'import os\n'), ((2015, 2057), 'os.path.join', 'os.path.join', (['phoneme_dir', "(prefix + '.npy')"], {}), "(phoneme_dir, prefix + '.npy')\n", (2027, 2057), False, 'import os\n'), ((2270, 2312), 'os.path.join', 'os.path.join', (['melspec_dir', "(prefix + '.npy')"], {}), "(melspec_dir, prefix + '.npy')\n", (2282, 2312), False, 'import os\n'), ((2417, 2456), 'os.path.join', 'os.path.join', (['spec_dir', "(prefix + '.npy')"], {}), "(spec_dir, prefix + '.npy')\n", (2429, 2456), False, 'import os\n'), ((2095, 2126), 'os.path.join', 'os.path.join', (['wav_dir', 'wav_file'], {}), '(wav_dir, wav_file)\n', (2107, 2126), False, 'import os\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backbone import resnet18
from .fusion_modules import SumFusion, ConcatFusion, FiLM, GatedFusion
class AVClassifier(nn.Module):
def __init__(self, args):
super(AVClassifier, self).__init__()
fusion = args.fusion_method
if args.dataset == 'VGGSound':
n_classes = 309
elif args.dataset == 'KineticSound':
n_classes = 31
elif args.dataset == 'CREMAD':
n_classes = 6
elif args.dataset == 'AVE':
n_classes = 28
else:
raise NotImplementedError('Incorrect dataset name {}'.format(args.dataset))
if fusion == 'sum':
self.fusion_module = SumFusion(output_dim=n_classes)
elif fusion == 'concat':
self.fusion_module = ConcatFusion(output_dim=n_classes)
elif fusion == 'film':
self.fusion_module = FiLM(output_dim=n_classes, x_film=True)
elif fusion == 'gated':
self.fusion_module = GatedFusion(output_dim=n_classes, x_gate=True)
else:
raise NotImplementedError('Incorrect fusion method: {}!'.format(fusion))
self.audio_net = resnet18(modality='audio')
self.visual_net = resnet18(modality='visual')
def forward(self, audio, visual):
a = self.audio_net(audio)
v = self.visual_net(visual)
(_, C, H, W) = v.size()
B = a.size()[0]
v = v.view(B, -1, C, H, W)
v = v.permute(0, 2, 1, 3, 4)
a = F.adaptive_avg_pool2d(a, 1)
v = F.adaptive_avg_pool3d(v, 1)
a = torch.flatten(a, 1)
v = torch.flatten(v, 1)
a, v, out = self.fusion_module(a, v)
return a, v, out
|
[
"torch.flatten",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.adaptive_avg_pool3d"
] |
[((1552, 1579), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['a', '(1)'], {}), '(a, 1)\n', (1573, 1579), True, 'import torch.nn.functional as F\n'), ((1592, 1619), 'torch.nn.functional.adaptive_avg_pool3d', 'F.adaptive_avg_pool3d', (['v', '(1)'], {}), '(v, 1)\n', (1613, 1619), True, 'import torch.nn.functional as F\n'), ((1633, 1652), 'torch.flatten', 'torch.flatten', (['a', '(1)'], {}), '(a, 1)\n', (1646, 1652), False, 'import torch\n'), ((1665, 1684), 'torch.flatten', 'torch.flatten', (['v', '(1)'], {}), '(v, 1)\n', (1678, 1684), False, 'import torch\n')]
|
# Cube Class - Marching Cubes Algorithm
from graphics import *
from triangles import get_triangles # triangle-table look-up
class Cube:
""" - Cube Class -
"""
def __init__(self, pos, size, inside=[]):
# coordinates
self.pos = pos # center of the cube
# shape
self.size = size
# vertices
self.vertices = set_vertices(self.pos)
self.vertices = resize_vertices(self.vertices, self.size)
# edge_points
self.edge_points = set_edge_points(self.pos)
self.edge_points = resize_edge_points(self.edge_points, self.size)
# initialization
self.inside = inside
self.triangles = []
self.surface_color = rgb_to_opengl((230,35,35))
self.vertex_color = rgb_to_opengl((255,255,255))
def set_triangles(self):
# make the list unique
self.inside = list(set(self.inside))
# print('inside is',self.inside)
# fetch the triangles
triangles = get_triangles(self.inside)
# print('beginning:',triangles)
# filter the -1s out
triangles = triangles[:triangles.index(-1)]
# print('middle:',triangles)
# chunk into groups of 3 (nodes for 1 triangle is a group of 3 index')
triangles = [triangles[i:i+3] for i in range(0,len(triangles),3)]
# apply
# print('finally:',triangles)
self.triangles = triangles
def draw(self):
# surfaces
for triangle in self.triangles:
# glBegin(GL_TRIANGLES)
for i in range(3):
glColor3fv(self.surface_color)
glVertex3fv(self.edge_points[triangle[i]])
glColor3fv((1,1,1)) # color to white
# glEnd()
def raw_draw(self):
# glBegin(GL_TRIANGLES)
# glColor3fv(self.vertex_color)
# glEnd()
# vertices
# glBegin(GL_LINES)
for edge in edges: # from little_opengl
glVertex3fv(self.vertices[edge[0]])
glVertex3fv(self.vertices[edge[1]])
# glEnd()
def interpolate(self):
pass
|
[
"triangles.get_triangles"
] |
[((871, 897), 'triangles.get_triangles', 'get_triangles', (['self.inside'], {}), '(self.inside)\n', (884, 897), False, 'from triangles import get_triangles\n')]
|
from __future__ import print_function
import os
import unittest
from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES
from pywekaclassifiers.classifiers import IBk # pylint: disable=no-name-in-module
from pywekaclassifiers import arff
from pywekaclassifiers.arff import Num, Nom, Int, Str, Date
class Test(unittest.TestCase):
def test_arff(self):
data = arff.ArffFile.load(os.path.join(BP, 'fixtures/abalone-train.arff'))
self.assertEqual(len(data.attributes), 9)
def test_numeric(self):
n1 = Num(1.23)
n2 = Num(4.56)
self.assertEqual(n1.value, 1.23)
self.assertEqual(n2.value, 4.56)
n3 = n1 + n2
self.assertEqual(n3.value, n1.value + n2.value)
self.assertNotEqual(n3, n1)
self.assertNotEqual(n3, n2)
n3 += 1
self.assertEqual(n3.value, n1.value + n2.value + 1)
s = sum([n1, n2, n3], Num(0))
print(s)
self.assertTrue(isinstance(s, Num))
self.assertEqual(s.value, (n1 + n2 + n3).value)
n4 = n1 / 10
self.assertEqual(n4.value, 0.123)
n4 /= 10
self.assertEqual(n4.value, 0.0123)
def test_IBk(self):
# Train a classifier.
print('Training IBk classifier...')
c = Classifier(name='weka.classifiers.lazy.IBk', ckargs={'-K':1})
training_fn = os.path.join(BP, 'fixtures/abalone-train.arff')
c.train(training_fn, verbose=1)
self.assertTrue(c._model_data)
# Make a valid query.
print('Using IBk classifier...')
query_fn = os.path.join(BP, 'fixtures/abalone-query.arff')
predictions = list(c.predict(query_fn, verbose=1, cleanup=0))
pred0 = predictions[0]
print('pred0:', pred0)
pred1 = PredictionResult(actual=None, predicted=7, probability=None)
print('pred1:', pred1)
self.assertEqual(pred0, pred1)
# Make a valid query.
with self.assertRaises(PredictionError):
query_fn = os.path.join(BP, 'fixtures/abalone-query-bad.arff')
predictions = list(c.predict(query_fn, verbose=1, cleanup=0))
# Make a valid query manually.
query = arff.ArffFile(relation='test', schema=[
('Sex', ('M', 'F', 'I')),
('Length', 'numeric'),
('Diameter', 'numeric'),
('Height', 'numeric'),
('Whole weight', 'numeric'),
('Shucked weight', 'numeric'),
('Viscera weight', 'numeric'),
('Shell weight', 'numeric'),
('Class_Rings', 'integer'),
])
query.append(['M', 0.35, 0.265, 0.09, 0.2255, 0.0995, 0.0485, 0.07, '?'])
data_str0 = """%
@relation test
@attribute 'Sex' {F,I,M}
@attribute 'Length' numeric
@attribute 'Diameter' numeric
@attribute 'Height' numeric
@attribute 'Whole weight' numeric
@attribute 'Shucked weight' numeric
@attribute 'Viscera weight' numeric
@attribute 'Shell weight' numeric
@attribute 'Class_Rings' integer
@data
M,0.35,0.265,0.09,0.2255,0.0995,0.0485,0.07,?
"""
data_str1 = query.write(fmt=DENSE)
# print(data_str0
# print(data_str1
self.assertEqual(data_str0, data_str1)
predictions = list(c.predict(query, verbose=1, cleanup=0))
self.assertEqual(predictions[0],
PredictionResult(actual=None, predicted=7, probability=None))
# Test pickling.
fn = os.path.join(BP, 'fixtures/IBk.pkl')
c.save(fn)
c = Classifier.load(fn)
predictions = list(c.predict(query, verbose=1, cleanup=0))
self.assertEqual(predictions[0],
PredictionResult(actual=None, predicted=7, probability=None))
#print('Pickle verified.')
# Make a valid dict query manually.
query = arff.ArffFile(relation='test', schema=[
('Sex', ('M', 'F', 'I')),
('Length', 'numeric'),
('Diameter', 'numeric'),
('Height', 'numeric'),
('Whole weight', 'numeric'),
('Shucked weight', 'numeric'),
('Viscera weight', 'numeric'),
('Shell weight', 'numeric'),
('Class_Rings', 'integer'),
])
query.append({
'Sex': 'M',
'Length': 0.35,
'Diameter': 0.265,
'Height': 0.09,
'Whole weight': 0.2255,
'Shucked weight': 0.0995,
'Viscera weight': 0.0485,
'Shell weight': 0.07,
'Class_Rings': arff.MISSING,
})
predictions = list(c.predict(query, verbose=1, cleanup=0))
self.assertEqual(predictions[0],
PredictionResult(actual=None, predicted=7, probability=None))
def test_shortcut(self):
c = IBk(K=1) # pylint: disable=undefined-variable
training_fn = os.path.join(BP, 'fixtures/abalone-train.arff')
c.train(training_fn, verbose=1)
self.assertTrue(c._model_data)
# Make a valid query.
query_fn = os.path.join(BP, 'fixtures/abalone-query.arff')
predictions = list(c.predict(query_fn, verbose=1, cleanup=0))
self.assertEqual(len(predictions), 1)
self.assertEqual(predictions[0],
PredictionResult(actual=None, predicted=7, probability=None))
def test_updateable(self):
"""
Confirm updateable classifiers are used so that their model is in fact
updated and not overwritten.
"""
c = IBk(K=1) # pylint: disable=undefined-variable
self.assertTrue('IBk' in UPDATEABLE_WEKA_CLASSIFIER_NAMES)
train_fn1 = os.path.join(BP, 'fixtures/updateable-train-1.arff')
train_fn2 = os.path.join(BP, 'fixtures/updateable-train-2.arff')
save_fn = os.path.join(BP, 'fixtures/IBk.updated.pkl')
if os.path.isfile(save_fn):
os.remove(save_fn)
c.train(train_fn1)
self.assertTrue(c._model_data)
# It should have a perfect accuracy when tested on the same file
# it was trained with.
acc = c.test(train_fn1, verbose=1)
self.assertEqual(acc, 1.0)
# It should have horrible accuracy on a completely different data
# file that it hasn't been trained on.
acc = c.test(train_fn2, verbose=1)
self.assertEqual(acc, 0.0)
pre_del_model = c._model_data
# Reload the classifier from a pickle.
c.save(save_fn)
del c
c = IBk.load(save_fn) # pylint: disable=undefined-variable
self.assertTrue(c._model_data)
self.assertEqual(c._model_data, pre_del_model)
# Confirm the Weka model was persisted by confirming we still have
# perfect accuracy on the initial training file.
acc = c.test(train_fn1, verbose=1)
self.assertEqual(acc, 1.0)
# Train the classifier on a completely different data set.
c.train(train_fn2)
# Confirm it has perfect accuracy on the new data set.
acc = c.test(train_fn2, verbose=1)
self.assertEqual(acc, 1.0)
# Confirm we still have perfect accuracy on the original data set.
acc = c.test(train_fn1, verbose=1)
self.assertEqual(acc, 1.0)
def test_PredictionResult_cmp(self):
a = PredictionResult(1, 2, 3)
b = PredictionResult(1, 2, 3)
self.assertEqual(a, b)
def test_sparse_stream(self):
s0 = """%
@relation test-abalone
@attribute 'Diameter' numeric
@attribute 'Length' numeric
@attribute 'Sex' {F,M}
@attribute 'Timestamp' date "yyyy-MM-dd HH:mm:ss"
@attribute 'Whole_weight' numeric
@attribute 'Class_Rings' integer
@data
{0 0.286, 1 0.35, 2 M, 3 "2017-12-01 00:00:00", 5 15}
{0 0.86, 2 F, 3 "2017-12-27 00:00:00", 4 0.98, 5 7}
"""
rows = [
dict(
Sex=Nom('M'),
Length=Num(0.35),
Diameter=Num(0.286),
Timestamp=Date('2017-12-01'),
Class_Rings=Int(15, cls=True)),
dict(
Sex=Nom('F'),
Diameter=Num(0.86),
Timestamp=Date('2017-12-27'),
Whole_weight=Num(0.98),
Class_Rings=Int(7, cls=True)),
]
rows_extra = [
dict(
Sex=Nom('N'),
Length=Num(0.35),
Diameter=Num(0.286),
Class_Rings=Int(15, cls=True)),
dict(
Sex=Nom('B'),
Diameter=Num(0.86),
Whole_weight=Num(0.98),
Class_Rings=Int(7, cls=True)),
]
a1 = arff.ArffFile(relation='test-abalone')
for row in rows:
a1.append(row)
self.assertEqual(a1.class_attr_name, 'Class_Rings')
a1.alphabetize_attributes()
print('a1.attributes:', a1.attributes)
self.assertEqual(a1.attributes, ['Diameter', 'Length', 'Sex', 'Timestamp', 'Whole_weight', 'Class_Rings'])
s1 = a1.write()
print('s0:', s0)
print('s1:', s1)
self.assertEqual(s1, s0)
a2 = arff.ArffFile.parse(s1)
for i, line in enumerate(a2.data):
self.assertEqual(line, a1.data[i])
s2 = a2.write()
self.assertEqual(s1, s2)
a3 = arff.ArffFile(relation='test-abalone')
self.assertEqual(len(a3.data), 0)
#a3.open_stream(class_attr_name='Class_Rings')
# When streaming, you have to provide your schema ahead of time,
# since otherwise we'd have to update the indexes on all files
# previously written to the file.
for row in rows:
a3.append(row, schema_only=True)
self.assertEqual(len(a3.data), 0)
a3.alphabetize_attributes()
a3.open_stream(class_attr_name='Class_Rings')
for row in (rows+rows_extra):
print('row:', row)
a3.append(row)
self.assertEqual(len(a3.data), 0)
fn = a3.close_stream()
s3 = open(fn, 'r').read()
print('s3:', s3)
s4 = """%
@relation test-abalone
@attribute 'Diameter' numeric
@attribute 'Length' numeric
@attribute 'Sex' {F,M}
@attribute 'Timestamp' date "yyyy-MM-dd HH:mm:ss"
@attribute 'Whole_weight' numeric
@attribute 'Class_Rings' integer
@data
{0 0.286, 1 0.35, 2 M, 3 "2017-12-01 00:00:00", 5 15}
{0 0.86, 2 F, 3 "2017-12-27 00:00:00", 4 0.98, 5 7}
{0 0.286, 1 0.35, 5 15}
{0 0.86, 4 0.98, 5 7}
"""
print('s4:', s4)
os.remove(fn)
# Note the rows that have features violating the schema are
# automatically omitted when in streaming mode.
self.assertEqual(s3, s4)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.remove",
"pywekaclassifiers.arff.ArffFile.parse",
"pywekaclassifiers.arff.Date",
"pywekaclassifiers.arff.ArffFile",
"pywekaclassifiers.arff.Nom",
"pywekaclassifiers.classifiers.Classifier.load",
"pywekaclassifiers.arff.Int",
"pywekaclassifiers.classifiers.Classifier",
"os.path.isfile",
"pywekaclassifiers.classifiers.IBk",
"pywekaclassifiers.arff.Num",
"pywekaclassifiers.classifiers.IBk.load",
"os.path.join",
"pywekaclassifiers.classifiers.PredictionResult"
] |
[((11006, 11021), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11019, 11021), False, 'import unittest\n'), ((632, 641), 'pywekaclassifiers.arff.Num', 'Num', (['(1.23)'], {}), '(1.23)\n', (635, 641), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((655, 664), 'pywekaclassifiers.arff.Num', 'Num', (['(4.56)'], {}), '(4.56)\n', (658, 664), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((1419, 1481), 'pywekaclassifiers.classifiers.Classifier', 'Classifier', ([], {'name': '"""weka.classifiers.lazy.IBk"""', 'ckargs': "{'-K': 1}"}), "(name='weka.classifiers.lazy.IBk', ckargs={'-K': 1})\n", (1429, 1481), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((1503, 1550), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-train.arff"""'], {}), "(BP, 'fixtures/abalone-train.arff')\n", (1515, 1550), False, 'import os\n'), ((1729, 1776), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-query.arff"""'], {}), "(BP, 'fixtures/abalone-query.arff')\n", (1741, 1776), False, 'import os\n'), ((1925, 1985), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', ([], {'actual': 'None', 'predicted': '(7)', 'probability': 'None'}), '(actual=None, predicted=7, probability=None)\n', (1941, 1985), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((2365, 2668), 'pywekaclassifiers.arff.ArffFile', 'arff.ArffFile', ([], {'relation': '"""test"""', 'schema': "[('Sex', ('M', 'F', 'I')), ('Length', 'numeric'), ('Diameter', 'numeric'),\n ('Height', 'numeric'), ('Whole weight', 'numeric'), ('Shucked weight',\n 'numeric'), ('Viscera weight', 'numeric'), ('Shell weight', 'numeric'),\n ('Class_Rings', 'integer')]"}), "(relation='test', schema=[('Sex', ('M', 'F', 'I')), ('Length',\n 'numeric'), ('Diameter', 'numeric'), ('Height', 'numeric'), (\n 'Whole weight', 'numeric'), ('Shucked weight', 'numeric'), (\n 'Viscera weight', 'numeric'), ('Shell weight', 'numeric'), (\n 'Class_Rings', 'integer')])\n", (2378, 2668), False, 'from pywekaclassifiers import arff\n'), ((3601, 3637), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/IBk.pkl"""'], {}), "(BP, 'fixtures/IBk.pkl')\n", (3613, 3637), False, 'import os\n'), ((3669, 3688), 'pywekaclassifiers.classifiers.Classifier.load', 'Classifier.load', (['fn'], {}), '(fn)\n', (3684, 3688), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((3975, 4278), 'pywekaclassifiers.arff.ArffFile', 'arff.ArffFile', ([], {'relation': '"""test"""', 'schema': "[('Sex', ('M', 'F', 'I')), ('Length', 'numeric'), ('Diameter', 'numeric'),\n ('Height', 'numeric'), ('Whole weight', 'numeric'), ('Shucked weight',\n 'numeric'), ('Viscera weight', 'numeric'), ('Shell weight', 'numeric'),\n ('Class_Rings', 'integer')]"}), "(relation='test', schema=[('Sex', ('M', 'F', 'I')), ('Length',\n 'numeric'), ('Diameter', 'numeric'), ('Height', 'numeric'), (\n 'Whole weight', 'numeric'), ('Shucked weight', 'numeric'), (\n 'Viscera weight', 'numeric'), ('Shell weight', 'numeric'), (\n 'Class_Rings', 'integer')])\n", (3988, 4278), False, 'from pywekaclassifiers import arff\n'), ((4935, 4943), 'pywekaclassifiers.classifiers.IBk', 'IBk', ([], {'K': '(1)'}), '(K=1)\n', (4938, 4943), False, 'from pywekaclassifiers.classifiers import IBk\n'), ((5012, 5059), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-train.arff"""'], {}), "(BP, 'fixtures/abalone-train.arff')\n", (5024, 5059), False, 'import os\n'), ((5197, 5244), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-query.arff"""'], {}), "(BP, 'fixtures/abalone-query.arff')\n", (5209, 5244), False, 'import os\n'), ((5668, 5676), 'pywekaclassifiers.classifiers.IBk', 'IBk', ([], {'K': '(1)'}), '(K=1)\n', (5671, 5676), False, 'from pywekaclassifiers.classifiers import IBk\n'), ((5810, 5862), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/updateable-train-1.arff"""'], {}), "(BP, 'fixtures/updateable-train-1.arff')\n", (5822, 5862), False, 'import os\n'), ((5883, 5935), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/updateable-train-2.arff"""'], {}), "(BP, 'fixtures/updateable-train-2.arff')\n", (5895, 5935), False, 'import os\n'), ((5954, 5998), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/IBk.updated.pkl"""'], {}), "(BP, 'fixtures/IBk.updated.pkl')\n", (5966, 5998), False, 'import os\n'), ((6010, 6033), 'os.path.isfile', 'os.path.isfile', (['save_fn'], {}), '(save_fn)\n', (6024, 6033), False, 'import os\n'), ((6693, 6710), 'pywekaclassifiers.classifiers.IBk.load', 'IBk.load', (['save_fn'], {}), '(save_fn)\n', (6701, 6710), False, 'from pywekaclassifiers.classifiers import IBk\n'), ((7539, 7564), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (7555, 7564), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((7577, 7602), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (7593, 7602), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((8906, 8944), 'pywekaclassifiers.arff.ArffFile', 'arff.ArffFile', ([], {'relation': '"""test-abalone"""'}), "(relation='test-abalone')\n", (8919, 8944), False, 'from pywekaclassifiers import arff\n'), ((9384, 9407), 'pywekaclassifiers.arff.ArffFile.parse', 'arff.ArffFile.parse', (['s1'], {}), '(s1)\n', (9403, 9407), False, 'from pywekaclassifiers import arff\n'), ((9577, 9615), 'pywekaclassifiers.arff.ArffFile', 'arff.ArffFile', ([], {'relation': '"""test-abalone"""'}), "(relation='test-abalone')\n", (9590, 9615), False, 'from pywekaclassifiers import arff\n'), ((10803, 10816), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (10812, 10816), False, 'import os\n'), ((478, 525), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-train.arff"""'], {}), "(BP, 'fixtures/abalone-train.arff')\n", (490, 525), False, 'import os\n'), ((1029, 1035), 'pywekaclassifiers.arff.Num', 'Num', (['(0)'], {}), '(0)\n', (1032, 1035), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((2171, 2222), 'os.path.join', 'os.path.join', (['BP', '"""fixtures/abalone-query-bad.arff"""'], {}), "(BP, 'fixtures/abalone-query-bad.arff')\n", (2183, 2222), False, 'import os\n'), ((3492, 3552), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', ([], {'actual': 'None', 'predicted': '(7)', 'probability': 'None'}), '(actual=None, predicted=7, probability=None)\n', (3508, 3552), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((3809, 3869), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', ([], {'actual': 'None', 'predicted': '(7)', 'probability': 'None'}), '(actual=None, predicted=7, probability=None)\n', (3825, 3869), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((4831, 4891), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', ([], {'actual': 'None', 'predicted': '(7)', 'probability': 'None'}), '(actual=None, predicted=7, probability=None)\n', (4847, 4891), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((5414, 5474), 'pywekaclassifiers.classifiers.PredictionResult', 'PredictionResult', ([], {'actual': 'None', 'predicted': '(7)', 'probability': 'None'}), '(actual=None, predicted=7, probability=None)\n', (5430, 5474), False, 'from pywekaclassifiers.classifiers import Classifier, PredictionResult, PredictionError, BP, DENSE, UPDATEABLE_WEKA_CLASSIFIER_NAMES\n'), ((6047, 6065), 'os.remove', 'os.remove', (['save_fn'], {}), '(save_fn)\n', (6056, 6065), False, 'import os\n'), ((8111, 8119), 'pywekaclassifiers.arff.Nom', 'Nom', (['"""M"""'], {}), "('M')\n", (8114, 8119), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8144, 8153), 'pywekaclassifiers.arff.Num', 'Num', (['(0.35)'], {}), '(0.35)\n', (8147, 8153), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8180, 8190), 'pywekaclassifiers.arff.Num', 'Num', (['(0.286)'], {}), '(0.286)\n', (8183, 8190), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8218, 8236), 'pywekaclassifiers.arff.Date', 'Date', (['"""2017-12-01"""'], {}), "('2017-12-01')\n", (8222, 8236), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8266, 8283), 'pywekaclassifiers.arff.Int', 'Int', (['(15)'], {'cls': '(True)'}), '(15, cls=True)\n', (8269, 8283), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8324, 8332), 'pywekaclassifiers.arff.Nom', 'Nom', (['"""F"""'], {}), "('F')\n", (8327, 8332), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8359, 8368), 'pywekaclassifiers.arff.Num', 'Num', (['(0.86)'], {}), '(0.86)\n', (8362, 8368), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8396, 8414), 'pywekaclassifiers.arff.Date', 'Date', (['"""2017-12-27"""'], {}), "('2017-12-27')\n", (8400, 8414), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8445, 8454), 'pywekaclassifiers.arff.Num', 'Num', (['(0.98)'], {}), '(0.98)\n', (8448, 8454), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8484, 8500), 'pywekaclassifiers.arff.Int', 'Int', (['(7)'], {'cls': '(True)'}), '(7, cls=True)\n', (8487, 8500), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8574, 8582), 'pywekaclassifiers.arff.Nom', 'Nom', (['"""N"""'], {}), "('N')\n", (8577, 8582), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8607, 8616), 'pywekaclassifiers.arff.Num', 'Num', (['(0.35)'], {}), '(0.35)\n', (8610, 8616), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8643, 8653), 'pywekaclassifiers.arff.Num', 'Num', (['(0.286)'], {}), '(0.286)\n', (8646, 8653), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8683, 8700), 'pywekaclassifiers.arff.Int', 'Int', (['(15)'], {'cls': '(True)'}), '(15, cls=True)\n', (8686, 8700), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8741, 8749), 'pywekaclassifiers.arff.Nom', 'Nom', (['"""B"""'], {}), "('B')\n", (8744, 8749), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8776, 8785), 'pywekaclassifiers.arff.Num', 'Num', (['(0.86)'], {}), '(0.86)\n', (8779, 8785), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8816, 8825), 'pywekaclassifiers.arff.Num', 'Num', (['(0.98)'], {}), '(0.98)\n', (8819, 8825), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n'), ((8855, 8871), 'pywekaclassifiers.arff.Int', 'Int', (['(7)'], {'cls': '(True)'}), '(7, cls=True)\n', (8858, 8871), False, 'from pywekaclassifiers.arff import Num, Nom, Int, Str, Date\n')]
|
import os
from collections import deque
from multiprocessing import Process
import cv2 as cv
import dlib
import numpy as np
from skimage import transform as tf
from tqdm import tqdm
STD_SIZE = (224, 224)
stablePntsIDs = [33, 36, 39, 42, 45]
def shape_to_array(shape):
coords = np.empty((68, 2))
for i in range(0, 68):
coords[i][0] = shape.part(i).x
coords[i][1] = shape.part(i).y
return coords
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0)
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
cutted_img = np.copy(img[int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
def crop_patch(frames, landmarks, mean_face_landmarks):
"""Crop mouth patch
:param str frames: video_frames
:param list landmarks: interpolated landmarks
"""
for frame_idx, frame in enumerate(frames):
if frame_idx == 0:
q_frame, q_landmarks = deque(), deque()
sequence = []
q_landmarks.append(landmarks[frame_idx])
q_frame.append(frame)
if len(q_frame) == 12:
smoothed_landmarks = np.mean(q_landmarks, axis=0)
cur_landmarks = q_landmarks.popleft()
cur_frame = q_frame.popleft()
# -- affine transformation
trans = tf.estimate_transform('similarity', smoothed_landmarks[stablePntsIDs, :], mean_face_landmarks[stablePntsIDs, :])
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
trans_landmarks = trans(cur_landmarks)
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
if frame_idx == len(landmarks) - 1:
while q_frame:
cur_frame = q_frame.popleft()
# -- transform frame
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
# -- transform landmarks
trans_landmarks = trans(q_landmarks.popleft())
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
return np.array(sequence)
return None
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx]
stop_landmarks = landmarks[stop_idx]
delta = stop_landmarks - start_landmarks
for idx in range(1, stop_idx - start_idx):
landmarks[start_idx + idx] = start_landmarks + idx / float(stop_idx - start_idx) * delta
return landmarks
def landmarks_interpolate(landmarks):
"""Interpolate landmarks
param list landmarks: landmarks detected in raw videos
"""
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
if not valid_frames_idx:
return None
for idx in range(1, len(valid_frames_idx)):
if valid_frames_idx[idx] - valid_frames_idx[idx - 1] == 1:
continue
else:
landmarks = linear_interpolate(landmarks, valid_frames_idx[idx - 1], valid_frames_idx[idx])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
# -- Corner case: keep frames at the beginning or at the end failed to be detected.
if valid_frames_idx:
landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
return landmarks
def preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
"""
Function to preprocess each data sample.
"""
videoFile = file + ".mp4"
audioFile = file + ".flac"
roiFile = file + ".png"
# Extract the audio from the video file using the FFmpeg utility and save it to a flac file.
if withaudio:
v2aCommand = "ffmpeg -y -v quiet -i " + videoFile + " -ac 1 -ar 16000 -vn " + audioFile
os.system(v2aCommand)
# for each frame, resize to 224x224 and crop the central 112x112 region
captureObj = cv.VideoCapture(videoFile)
frames = list()
landmarks = list()
while captureObj.isOpened():
ret, frame = captureObj.read()
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if not len(frame) == 224:
frame = cv.resize(frame, (224, 224))
frames.append(frame)
face_rects = face_detector(frame, 0) # Detect face
if len(face_rects) < 1:
landmarks.append(None)
continue
rect = face_rects[0] # Proper number of face
landmark = landmark_detector(frame, rect) # Detect face landmarks
landmark = shape_to_array(landmark)
landmarks.append(landmark)
else:
break
captureObj.release()
preprocessed_landmarks = landmarks_interpolate(landmarks)
if preprocessed_landmarks is None:
if defaultcrop == "lrs":
frames = [frame[52:172, 52:172] for frame in frames]
else:
frames = [frame[103: 223, 67: 187] for frame in frames]
else:
frames = crop_patch(frames, preprocessed_landmarks, mean_face_landmarks)
assert frames is not None, "cannot crop from {}.".format(videoFile)
cv.imwrite(roiFile, np.concatenate(frames, axis=1).astype(int))
def preprocess_sample_list(filesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
for file in tqdm(filesList, leave=True, desc="Preprocess", ncols=75):
preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop)
def preprocessing(filesList, processes, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
# Preprocessing each sample
print("\nNumber of data samples to be processed = %d" % (len(filesList)))
print("\n\nStarting preprocessing ....\n")
face_detector = dlib.get_frontal_face_detector()
def splitlist(inlist, chunksize):
return [inlist[x:x + chunksize] for x in range(0, len(inlist), chunksize)]
filesListSplitted = splitlist(filesList, int((len(filesList) / processes)))
process_list = []
for subFilesList in filesListSplitted:
p = Process(target=preprocess_sample_list, args=(subFilesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop))
process_list.append(p)
p.Daemon = True
p.start()
for p in process_list:
p.join()
|
[
"cv2.resize",
"tqdm.tqdm",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.empty",
"os.system",
"cv2.VideoCapture",
"numpy.mean",
"numpy.array",
"dlib.get_frontal_face_detector",
"skimage.transform.warp",
"skimage.transform.estimate_transform",
"multiprocessing.Process",
"collections.deque"
] |
[((300, 317), 'numpy.empty', 'np.empty', (['(68, 2)'], {}), '((68, 2))\n', (308, 317), True, 'import numpy as np\n'), ((535, 561), 'numpy.mean', 'np.mean', (['landmarks'], {'axis': '(0)'}), '(landmarks, axis=0)\n', (542, 561), True, 'import numpy as np\n'), ((5524, 5550), 'cv2.VideoCapture', 'cv.VideoCapture', (['videoFile'], {}), '(videoFile)\n', (5539, 5550), True, 'import cv2 as cv\n'), ((6999, 7055), 'tqdm.tqdm', 'tqdm', (['filesList'], {'leave': '(True)', 'desc': '"""Preprocess"""', 'ncols': '(75)'}), "(filesList, leave=True, desc='Preprocess', ncols=75)\n", (7003, 7055), False, 'from tqdm import tqdm\n'), ((7462, 7494), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (7492, 7494), False, 'import dlib\n'), ((5405, 5426), 'os.system', 'os.system', (['v2aCommand'], {}), '(v2aCommand)\n', (5414, 5426), False, 'import os\n'), ((7785, 7927), 'multiprocessing.Process', 'Process', ([], {'target': 'preprocess_sample_list', 'args': '(subFilesList, face_detector, landmark_detector, mean_face_landmarks,\n withaudio, defaultcrop)'}), '(target=preprocess_sample_list, args=(subFilesList, face_detector,\n landmark_detector, mean_face_landmarks, withaudio, defaultcrop))\n', (7792, 7927), False, 'from multiprocessing import Process\n'), ((1966, 1994), 'numpy.mean', 'np.mean', (['q_landmarks'], {'axis': '(0)'}), '(q_landmarks, axis=0)\n', (1973, 1994), True, 'import numpy as np\n'), ((2150, 2266), 'skimage.transform.estimate_transform', 'tf.estimate_transform', (['"""similarity"""', 'smoothed_landmarks[stablePntsIDs, :]', 'mean_face_landmarks[stablePntsIDs, :]'], {}), "('similarity', smoothed_landmarks[stablePntsIDs, :],\n mean_face_landmarks[stablePntsIDs, :])\n", (2171, 2266), True, 'from skimage import transform as tf\n'), ((2290, 2358), 'skimage.transform.warp', 'tf.warp', (['cur_frame'], {'inverse_map': 'trans.inverse', 'output_shape': 'STD_SIZE'}), '(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)\n', (2297, 2358), True, 'from skimage import transform as tf\n'), ((3373, 3391), 'numpy.array', 'np.array', (['sequence'], {}), '(sequence)\n', (3381, 3391), True, 'import numpy as np\n'), ((5708, 5745), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (5719, 5745), True, 'import cv2 as cv\n'), ((1773, 1780), 'collections.deque', 'deque', ([], {}), '()\n', (1778, 1780), False, 'from collections import deque\n'), ((1782, 1789), 'collections.deque', 'deque', ([], {}), '()\n', (1787, 1789), False, 'from collections import deque\n'), ((2881, 2949), 'skimage.transform.warp', 'tf.warp', (['cur_frame'], {'inverse_map': 'trans.inverse', 'output_shape': 'STD_SIZE'}), '(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)\n', (2888, 2949), True, 'from skimage import transform as tf\n'), ((5810, 5838), 'cv2.resize', 'cv.resize', (['frame', '(224, 224)'], {}), '(frame, (224, 224))\n', (5819, 5838), True, 'import cv2 as cv\n'), ((6815, 6845), 'numpy.concatenate', 'np.concatenate', (['frames'], {'axis': '(1)'}), '(frames, axis=1)\n', (6829, 6845), True, 'import numpy as np\n')]
|