index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,400 | 9bae0801869c41a282d39f3b9d49d58c04e70ffb | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'IPSettings.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_IPSetting(object):
def setupUi(self, IPSetting):
IPSetting.setObjectName("IPSetting")
IPSetting.resize(1428, 884)
self.frame = QtWidgets.QFrame(IPSetting)
self.frame.setGeometry(QtCore.QRect(12, 11, 1519, 894))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.layoutWidget_5 = QtWidgets.QWidget(self.frame)
self.layoutWidget_5.setGeometry(QtCore.QRect(1150, 600, 158, 25))
self.layoutWidget_5.setObjectName("layoutWidget_5")
self.formLayout = QtWidgets.QFormLayout(self.layoutWidget_5)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.SA_BTN = QtWidgets.QPushButton(self.layoutWidget_5)
self.SA_BTN.setObjectName("SA_BTN")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.SA_BTN)
self.CNCL_BTN = QtWidgets.QPushButton(self.layoutWidget_5)
self.CNCL_BTN.setObjectName("CNCL_BTN")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.CNCL_BTN)
self.layoutWidget = QtWidgets.QWidget(self.frame)
self.layoutWidget.setGeometry(QtCore.QRect(0, 0, 1141, 631))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_5 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setObjectName("gridLayout_5")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.groupBox_4 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName("groupBox_4")
self.CSC_IP_LB = QtWidgets.QLabel(self.groupBox_4)
self.CSC_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSC_IP_LB.setObjectName("CSC_IP_LB")
self.CSC_P_LB = QtWidgets.QLabel(self.groupBox_4)
self.CSC_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSC_P_LB.setObjectName("CSC_P_LB")
self.CSC_D_LB = QtWidgets.QLabel(self.groupBox_4)
self.CSC_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSC_D_LB.setObjectName("CSC_D_LB")
self.CSC_SW_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_SW_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSC_SW_LE.setObjectName("CSC_SW_LE")
self.CSC_IP_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSC_IP_LE.setObjectName("CSC_IP_LE")
self.CSC_P_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSC_P_LE.setObjectName("CSC_P_LE")
self.CSC_D_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSC_D_LE.setObjectName("CSC_D_LE")
self.CSC_SW_LB = QtWidgets.QLabel(self.groupBox_4)
self.CSC_SW_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSC_SW_LB.setObjectName("CSC_SW_LB")
self.CSC_Y_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSC_Y_LE.setObjectName("CSC_Y_LE")
self.CSC_M_LE = QtWidgets.QTextEdit(self.groupBox_4)
self.CSC_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSC_M_LE.setObjectName("CSC_M_LE")
self.frame_3 = QtWidgets.QFrame(self.groupBox_4)
self.frame_3.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.frame_3.raise_()
self.CSC_IP_LB.raise_()
self.CSC_P_LB.raise_()
self.CSC_D_LB.raise_()
self.CSC_SW_LE.raise_()
self.CSC_IP_LE.raise_()
self.CSC_P_LE.raise_()
self.CSC_D_LE.raise_()
self.CSC_SW_LB.raise_()
self.CSC_Y_LE.raise_()
self.CSC_M_LE.raise_()
self.gridLayout.addWidget(self.groupBox_4, 0, 0, 1, 1)
self.groupBox_7 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_7.setFont(font)
self.groupBox_7.setObjectName("groupBox_7")
self.frame_4 = QtWidgets.QFrame(self.groupBox_7)
self.frame_4.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.CSVG_D_LB = QtWidgets.QLabel(self.groupBox_7)
self.CSVG_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSVG_D_LB.setObjectName("CSVG_D_LB")
self.CSVG_V_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSVG_V_LE.setObjectName("CSVG_V_LE")
self.CSVG_IP_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSVG_IP_LE.setObjectName("CSVG_IP_LE")
self.CSVG_IP_LB = QtWidgets.QLabel(self.groupBox_7)
self.CSVG_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSVG_IP_LB.setObjectName("CSVG_IP_LB")
self.CSVG_P_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSVG_P_LE.setObjectName("CSVG_P_LE")
self.CSVG_P_LB = QtWidgets.QLabel(self.groupBox_7)
self.CSVG_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSVG_P_LB.setObjectName("CSVG_P_LB")
self.CSVG_V_LB = QtWidgets.QLabel(self.groupBox_7)
self.CSVG_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSVG_V_LB.setObjectName("CSVG_V_LB")
self.CSVG_D_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSVG_D_LE.setObjectName("CSVG_D_LE")
self.CSVG_Y_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSVG_Y_LE.setObjectName("CSVG_Y_LE")
self.CSVG_M_LE = QtWidgets.QTextEdit(self.groupBox_7)
self.CSVG_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSVG_M_LE.setObjectName("CSVG_M_LE")
self.gridLayout.addWidget(self.groupBox_7, 0, 1, 1, 1)
self.groupBox_3 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.frame_5 = QtWidgets.QFrame(self.groupBox_3)
self.frame_5.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.CSIE1_D_LB = QtWidgets.QLabel(self.groupBox_3)
self.CSIE1_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSIE1_D_LB.setObjectName("CSIE1_D_LB")
self.CSIE1_V_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSIE1_V_LE.setObjectName("CSIE1_V_LE")
self.CSIE1_IP_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSIE1_IP_LE.setObjectName("CSIE1_IP_LE")
self.CSIE1_IP_LB = QtWidgets.QLabel(self.groupBox_3)
self.CSIE1_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSIE1_IP_LB.setObjectName("CSIE1_IP_LB")
self.CSIE1_P_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSIE1_P_LE.setObjectName("CSIE1_P_LE")
self.CSIE1_P_LB = QtWidgets.QLabel(self.groupBox_3)
self.CSIE1_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSIE1_P_LB.setObjectName("CSIE1_P_LB")
self.CSIE1_D_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSIE1_D_LE.setObjectName("CSIE1_D_LE")
self.CSIE1_V_LB = QtWidgets.QLabel(self.groupBox_3)
self.CSIE1_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSIE1_V_LB.setObjectName("CSIE1_V_LB")
self.CSIE1_Y_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSIE1_Y_LE.setObjectName("CSIE1_Y_LE")
self.CSIE1_M_LE = QtWidgets.QTextEdit(self.groupBox_3)
self.CSIE1_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSIE1_M_LE.setObjectName("CSIE1_M_LE")
self.gridLayout.addWidget(self.groupBox_3, 0, 2, 1, 1)
self.groupBox_6 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_6.setFont(font)
self.groupBox_6.setObjectName("groupBox_6")
self.frame_6 = QtWidgets.QFrame(self.groupBox_6)
self.frame_6.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.DVLS_IP_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.DVLS_IP_LE.setObjectName("DVLS_IP_LE")
self.DVLS_P_LB = QtWidgets.QLabel(self.groupBox_6)
self.DVLS_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.DVLS_P_LB.setObjectName("DVLS_P_LB")
self.DVLS_D_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.DVLS_D_LE.setObjectName("DVLS_D_LE")
self.DVLS_P_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.DVLS_P_LE.setObjectName("DVLS_P_LE")
self.DVLS_Y_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.DVLS_Y_LE.setObjectName("DVLS_Y_LE")
self.DVLS_V_LB = QtWidgets.QLabel(self.groupBox_6)
self.DVLS_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.DVLS_V_LB.setObjectName("DVLS_V_LB")
self.DVLS_M_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.DVLS_M_LE.setObjectName("DVLS_M_LE")
self.DVLS_V_LE = QtWidgets.QTextEdit(self.groupBox_6)
self.DVLS_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.DVLS_V_LE.setObjectName("DVLS_V_LE")
self.DVLS_D_LB = QtWidgets.QLabel(self.groupBox_6)
self.DVLS_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.DVLS_D_LB.setObjectName("DVLS_D_LB")
self.DVLS_IP_LB = QtWidgets.QLabel(self.groupBox_6)
self.DVLS_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.DVLS_IP_LB.setObjectName("DVLS_IP_LB")
self.gridLayout.addWidget(self.groupBox_6, 0, 3, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout, 0, 0, 1, 1)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_9 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_9.setFont(font)
self.groupBox_9.setObjectName("groupBox_9")
self.frame_7 = QtWidgets.QFrame(self.groupBox_9)
self.frame_7.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_7.setObjectName("frame_7")
self.CSLS1_Y_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSLS1_Y_LE.setObjectName("CSLS1_Y_LE")
self.CSLS1_M_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSLS1_M_LE.setObjectName("CSLS1_M_LE")
self.CSLS1_D_LB = QtWidgets.QLabel(self.groupBox_9)
self.CSLS1_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSLS1_D_LB.setObjectName("CSLS1_D_LB")
self.CSLS1_V_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSLS1_V_LE.setObjectName("CSLS1_V_LE")
self.CSLS1_IP_LB = QtWidgets.QLabel(self.groupBox_9)
self.CSLS1_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSLS1_IP_LB.setObjectName("CSLS1_IP_LB")
self.CSLS1_P_LB = QtWidgets.QLabel(self.groupBox_9)
self.CSLS1_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSLS1_P_LB.setObjectName("CSLS1_P_LB")
self.CSLS1_IP_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSLS1_IP_LE.setObjectName("CSLS1_IP_LE")
self.CSLS1_D_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSLS1_D_LE.setObjectName("CSLS1_D_LE")
self.CSLS1_V_LB = QtWidgets.QLabel(self.groupBox_9)
self.CSLS1_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSLS1_V_LB.setObjectName("CSLS1_V_LB")
self.CSLS1_P_LE = QtWidgets.QTextEdit(self.groupBox_9)
self.CSLS1_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSLS1_P_LE.setObjectName("CSLS1_P_LE")
self.gridLayout_2.addWidget(self.groupBox_9, 0, 0, 1, 1)
self.groupBox_10 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_10.setFont(font)
self.groupBox_10.setObjectName("groupBox_10")
self.frame_8 = QtWidgets.QFrame(self.groupBox_10)
self.frame_8.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_8.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_8.setObjectName("frame_8")
self.CSIL_D_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSIL_D_LE.setObjectName("CSIL_D_LE")
self.CSIL_Y_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSIL_Y_LE.setObjectName("CSIL_Y_LE")
self.CSIL_M_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSIL_M_LE.setObjectName("CSIL_M_LE")
self.CSIL_IP_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSIL_IP_LE.setObjectName("CSIL_IP_LE")
self.CSIL_D_LB = QtWidgets.QLabel(self.groupBox_10)
self.CSIL_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSIL_D_LB.setObjectName("CSIL_D_LB")
self.CSIL_V_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSIL_V_LE.setObjectName("CSIL_V_LE")
self.CSIL_IP_LB = QtWidgets.QLabel(self.groupBox_10)
self.CSIL_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSIL_IP_LB.setObjectName("CSIL_IP_LB")
self.CSIL_P_LE = QtWidgets.QTextEdit(self.groupBox_10)
self.CSIL_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSIL_P_LE.setObjectName("CSIL_P_LE")
self.CSIL_V_LB = QtWidgets.QLabel(self.groupBox_10)
self.CSIL_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSIL_V_LB.setObjectName("CSIL_V_LB")
self.CSIL_P_LB = QtWidgets.QLabel(self.groupBox_10)
self.CSIL_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSIL_P_LB.setObjectName("CSIL_P_LB")
self.gridLayout_2.addWidget(self.groupBox_10, 0, 1, 1, 1)
self.groupBox_11 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_11.setFont(font)
self.groupBox_11.setObjectName("groupBox_11")
self.frame_9 = QtWidgets.QFrame(self.groupBox_11)
self.frame_9.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_9.setObjectName("frame_9")
self.CSIE2_Y_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSIE2_Y_LE.setObjectName("CSIE2_Y_LE")
self.CSIE2_M_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSIE2_M_LE.setObjectName("CSIE2_M_LE")
self.CSIE2_V_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSIE2_V_LE.setObjectName("CSIE2_V_LE")
self.CSIE2_D_LB = QtWidgets.QLabel(self.groupBox_11)
self.CSIE2_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSIE2_D_LB.setObjectName("CSIE2_D_LB")
self.CSIE2_IP_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSIE2_IP_LE.setObjectName("CSIE2_IP_LE")
self.CSIE2_P_LB = QtWidgets.QLabel(self.groupBox_11)
self.CSIE2_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSIE2_P_LB.setObjectName("CSIE2_P_LB")
self.CSIE2_IP_LB = QtWidgets.QLabel(self.groupBox_11)
self.CSIE2_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSIE2_IP_LB.setObjectName("CSIE2_IP_LB")
self.CSIE2_P_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSIE2_P_LE.setObjectName("CSIE2_P_LE")
self.CSIE2_V_LB = QtWidgets.QLabel(self.groupBox_11)
self.CSIE2_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSIE2_V_LB.setObjectName("CSIE2_V_LB")
self.CSIE2_D_LE = QtWidgets.QTextEdit(self.groupBox_11)
self.CSIE2_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSIE2_D_LE.setObjectName("CSIE2_D_LE")
self.gridLayout_2.addWidget(self.groupBox_11, 0, 2, 1, 1)
self.groupBox_12 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_12.setFont(font)
self.groupBox_12.setObjectName("groupBox_12")
self.frame_10 = QtWidgets.QFrame(self.groupBox_12)
self.frame_10.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_10.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_10.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_10.setObjectName("frame_10")
self.DVVDR_D_LE_2 = QtWidgets.QLabel(self.groupBox_12)
self.DVVDR_D_LE_2.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.DVVDR_D_LE_2.setObjectName("DVVDR_D_LE_2")
self.DVVDR_V_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.DVVDR_V_LE.setObjectName("DVVDR_V_LE")
self.DVVDR_IP_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.DVVDR_IP_LE.setObjectName("DVVDR_IP_LE")
self.DVVDR_IP_LE_2 = QtWidgets.QLabel(self.groupBox_12)
self.DVVDR_IP_LE_2.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.DVVDR_IP_LE_2.setObjectName("DVVDR_IP_LE_2")
self.DVVDR_P_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.DVVDR_P_LE.setObjectName("DVVDR_P_LE")
self.DVVDR_P_LE_2 = QtWidgets.QLabel(self.groupBox_12)
self.DVVDR_P_LE_2.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.DVVDR_P_LE_2.setObjectName("DVVDR_P_LE_2")
self.DVVDR_V_LE_2 = QtWidgets.QLabel(self.groupBox_12)
self.DVVDR_V_LE_2.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.DVVDR_V_LE_2.setObjectName("DVVDR_V_LE_2")
self.DVVDR_D_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.DVVDR_D_LE.setObjectName("DVVDR_D_LE")
self.DVVDR_Y_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.DVVDR_Y_LE.setObjectName("DVVDR_Y_LE")
self.DVVDR_M_LE = QtWidgets.QTextEdit(self.groupBox_12)
self.DVVDR_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.DVVDR_M_LE.setObjectName("DVVDR_M_LE")
self.gridLayout_2.addWidget(self.groupBox_12, 0, 3, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_2, 1, 0, 1, 1)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.groupBox_16 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_16.setFont(font)
self.groupBox_16.setObjectName("groupBox_16")
self.frame_14 = QtWidgets.QFrame(self.groupBox_16)
self.frame_14.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_14.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_14.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_14.setObjectName("frame_14")
self.DVVP_D_LB = QtWidgets.QLabel(self.groupBox_16)
self.DVVP_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.DVVP_D_LB.setObjectName("DVVP_D_LB")
self.DVVP_V_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.DVVP_V_LE.setObjectName("DVVP_V_LE")
self.DVVP_IP_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.DVVP_IP_LE.setObjectName("DVVP_IP_LE")
self.DVVP_IP_LB = QtWidgets.QLabel(self.groupBox_16)
self.DVVP_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.DVVP_IP_LB.setObjectName("DVVP_IP_LB")
self.DVVP_P_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.DVVP_P_LE.setObjectName("DVVP_P_LE")
self.DVVP_P_LB = QtWidgets.QLabel(self.groupBox_16)
self.DVVP_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.DVVP_P_LB.setObjectName("DVVP_P_LB")
self.DVVP_V_LB = QtWidgets.QLabel(self.groupBox_16)
self.DVVP_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.DVVP_V_LB.setObjectName("DVVP_V_LB")
self.DVVP_D_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.DVVP_D_LE.setObjectName("DVVP_D_LE")
self.DVVP_Y_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.DVVP_Y_LE.setObjectName("DVVP_Y_LE")
self.DVVP_M_LE = QtWidgets.QTextEdit(self.groupBox_16)
self.DVVP_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.DVVP_M_LE.setObjectName("DVVP_M_LE")
self.gridLayout_3.addWidget(self.groupBox_16, 0, 3, 1, 1)
self.groupBox_15 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_15.setFont(font)
self.groupBox_15.setObjectName("groupBox_15")
self.frame_13 = QtWidgets.QFrame(self.groupBox_15)
self.frame_13.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_13.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_13.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_13.setObjectName("frame_13")
self.CSLR_V_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSLR_V_LE.setObjectName("CSLR_V_LE")
self.label_81 = QtWidgets.QLabel(self.groupBox_15)
self.label_81.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.label_81.setObjectName("label_81")
self.CSLR_IP_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSLR_IP_LE.setObjectName("CSLR_IP_LE")
self.label_83 = QtWidgets.QLabel(self.groupBox_15)
self.label_83.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.label_83.setObjectName("label_83")
self.label_82 = QtWidgets.QLabel(self.groupBox_15)
self.label_82.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.label_82.setObjectName("label_82")
self.CSLR_P_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSLR_P_LE.setObjectName("CSLR_P_LE")
self.CSLR_D_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSLR_D_LE.setObjectName("CSLR_D_LE")
self.label_84 = QtWidgets.QLabel(self.groupBox_15)
self.label_84.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.label_84.setObjectName("label_84")
self.CSLR_Y_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSLR_Y_LE.setObjectName("CSLR_Y_LE")
self.CSLR_M_LE = QtWidgets.QTextEdit(self.groupBox_15)
self.CSLR_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSLR_M_LE.setObjectName("CSLR_M_LE")
self.gridLayout_3.addWidget(self.groupBox_15, 0, 2, 1, 1)
self.groupBox_13 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_13.setFont(font)
self.groupBox_13.setObjectName("groupBox_13")
self.frame_11 = QtWidgets.QFrame(self.groupBox_13)
self.frame_11.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_11.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_11.setObjectName("frame_11")
self.CSLS2_V_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSLS2_V_LE.setObjectName("CSLS2_V_LE")
self.CSLS2_IP_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSLS2_IP_LE.setObjectName("CSLS2_IP_LE")
self.CSLS2_D_LB = QtWidgets.QLabel(self.groupBox_13)
self.CSLS2_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSLS2_D_LB.setObjectName("CSLS2_D_LB")
self.CSLS2_IP_LB = QtWidgets.QLabel(self.groupBox_13)
self.CSLS2_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSLS2_IP_LB.setObjectName("CSLS2_IP_LB")
self.CSLS2_P_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSLS2_P_LE.setObjectName("CSLS2_P_LE")
self.CSLS2_P_LB = QtWidgets.QLabel(self.groupBox_13)
self.CSLS2_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSLS2_P_LB.setObjectName("CSLS2_P_LB")
self.CSLS2_V_LB = QtWidgets.QLabel(self.groupBox_13)
self.CSLS2_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSLS2_V_LB.setObjectName("CSLS2_V_LB")
self.CSLS2_Y_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSLS2_Y_LE.setObjectName("CSLS2_Y_LE")
self.CSLS2_D_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSLS2_D_LE.setObjectName("CSLS2_D_LE")
self.CSLS2_M_LE = QtWidgets.QTextEdit(self.groupBox_13)
self.CSLS2_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSLS2_M_LE.setObjectName("CSLS2_M_LE")
self.gridLayout_3.addWidget(self.groupBox_13, 0, 0, 1, 1)
self.groupBox_14 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_14.setFont(font)
self.groupBox_14.setObjectName("groupBox_14")
self.frame_12 = QtWidgets.QFrame(self.groupBox_14)
self.frame_12.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_12.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_12.setObjectName("frame_12")
self.CSTID_V_LB_2 = QtWidgets.QLabel(self.groupBox_14)
self.CSTID_V_LB_2.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSTID_V_LB_2.setObjectName("CSTID_V_LB_2")
self.CSTID_V_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSTID_V_LE.setObjectName("CSTID_V_LE")
self.CSTID_IP_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSTID_IP_LE.setObjectName("CSTID_IP_LE")
self.CSTID_P_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSTID_P_LE.setObjectName("CSTID_P_LE")
self.CSTID_IP_LB = QtWidgets.QLabel(self.groupBox_14)
self.CSTID_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSTID_IP_LB.setObjectName("CSTID_IP_LB")
self.CSTID_V_LB = QtWidgets.QLabel(self.groupBox_14)
self.CSTID_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSTID_V_LB.setObjectName("CSTID_V_LB")
self.CSTID_D_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSTID_D_LE.setObjectName("CSTID_D_LE")
self.CSTID_Y_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSTID_Y_LE.setObjectName("CSTID_Y_LE")
self.CSTID_M_LE = QtWidgets.QTextEdit(self.groupBox_14)
self.CSTID_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSTID_M_LE.setObjectName("CSTID_M_LE")
self.CSTID_P_LB = QtWidgets.QLabel(self.groupBox_14)
self.CSTID_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSTID_P_LB.setObjectName("CSTID_P_LB")
self.gridLayout_3.addWidget(self.groupBox_14, 0, 1, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_3, 2, 0, 1, 1)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_18 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_18.setFont(font)
self.groupBox_18.setObjectName("groupBox_18")
self.frame_16 = QtWidgets.QFrame(self.groupBox_18)
self.frame_16.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_16.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_16.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_16.setObjectName("frame_16")
self.CSVP_V_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSVP_V_LE.setObjectName("CSVP_V_LE")
self.CSVP_D_LB = QtWidgets.QLabel(self.groupBox_18)
self.CSVP_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSVP_D_LB.setObjectName("CSVP_D_LB")
self.CSVP_IP_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSVP_IP_LE.setObjectName("CSVP_IP_LE")
self.CSVP_P_LB = QtWidgets.QLabel(self.groupBox_18)
self.CSVP_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSVP_P_LB.setObjectName("CSVP_P_LB")
self.CSVP_IP_LB = QtWidgets.QLabel(self.groupBox_18)
self.CSVP_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSVP_IP_LB.setObjectName("CSVP_IP_LB")
self.CSVP_P_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSVP_P_LE.setObjectName("CSVP_P_LE")
self.CSVP_D_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSVP_D_LE.setObjectName("CSVP_D_LE")
self.CSVP_V_LB = QtWidgets.QLabel(self.groupBox_18)
self.CSVP_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSVP_V_LB.setObjectName("CSVP_V_LB")
self.CSVP_Y_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSVP_Y_LE.setObjectName("CSVP_Y_LE")
self.CSVP_M_LE = QtWidgets.QTextEdit(self.groupBox_18)
self.CSVP_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSVP_M_LE.setObjectName("CSVP_M_LE")
self.gridLayout_4.addWidget(self.groupBox_18, 0, 1, 1, 1)
self.groupBox_19 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_19.setFont(font)
self.groupBox_19.setObjectName("groupBox_19")
self.frame_17 = QtWidgets.QFrame(self.groupBox_19)
self.frame_17.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_17.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_17.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_17.setObjectName("frame_17")
self.DVC_V_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.DVC_V_LE.setObjectName("DVC_V_LE")
self.label_97 = QtWidgets.QLabel(self.groupBox_19)
self.label_97.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.label_97.setObjectName("label_97")
self.DVC_IP_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.DVC_IP_LE.setObjectName("DVC_IP_LE")
self.label_98 = QtWidgets.QLabel(self.groupBox_19)
self.label_98.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.label_98.setObjectName("label_98")
self.DVC_P_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.DVC_P_LE.setObjectName("DVC_P_LE")
self.label_99 = QtWidgets.QLabel(self.groupBox_19)
self.label_99.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.label_99.setObjectName("label_99")
self.DVC_D_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.DVC_D_LE.setObjectName("DVC_D_LE")
self.label_100 = QtWidgets.QLabel(self.groupBox_19)
self.label_100.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.label_100.setObjectName("label_100")
self.DVC_Y_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.DVC_Y_LE.setObjectName("DVC_Y_LE")
self.DVC_M_LE = QtWidgets.QTextEdit(self.groupBox_19)
self.DVC_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.DVC_M_LE.setObjectName("DVC_M_LE")
self.gridLayout_4.addWidget(self.groupBox_19, 0, 2, 1, 1)
self.groupBox_20 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_20.setFont(font)
self.groupBox_20.setObjectName("groupBox_20")
self.frame_18 = QtWidgets.QFrame(self.groupBox_20)
self.frame_18.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_18.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_18.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_18.setObjectName("frame_18")
self.DVLR_V_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_V_LE.setGeometry(QtCore.QRect(76, 83, 194, 25))
self.DVLR_V_LE.setObjectName("DVLR_V_LE")
self.DVLR_IP_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_IP_LE.setGeometry(QtCore.QRect(76, 20, 194, 26))
self.DVLR_IP_LE.setObjectName("DVLR_IP_LE")
self.DVLR_D_LB = QtWidgets.QLabel(self.groupBox_20)
self.DVLR_D_LB.setGeometry(QtCore.QRect(11, 114, 26, 16))
self.DVLR_D_LB.setObjectName("DVLR_D_LB")
self.DVLR_P_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_P_LE.setGeometry(QtCore.QRect(76, 52, 194, 25))
self.DVLR_P_LE.setObjectName("DVLR_P_LE")
self.DVLR_IP_LB = QtWidgets.QLabel(self.groupBox_20)
self.DVLR_IP_LB.setGeometry(QtCore.QRect(11, 20, 59, 16))
self.DVLR_IP_LB.setObjectName("DVLR_IP_LB")
self.DVLR_P_LB = QtWidgets.QLabel(self.groupBox_20)
self.DVLR_P_LB.setGeometry(QtCore.QRect(11, 52, 48, 16))
self.DVLR_P_LB.setObjectName("DVLR_P_LB")
self.DVLR_D_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_D_LE.setGeometry(QtCore.QRect(76, 114, 61, 25))
self.DVLR_D_LE.setObjectName("DVLR_D_LE")
self.DVLR_V_LB = QtWidgets.QLabel(self.groupBox_20)
self.DVLR_V_LB.setGeometry(QtCore.QRect(11, 83, 51, 16))
self.DVLR_V_LB.setObjectName("DVLR_V_LB")
self.DVLR_Y_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_Y_LE.setGeometry(QtCore.QRect(209, 114, 61, 25))
self.DVLR_Y_LE.setObjectName("DVLR_Y_LE")
self.DVLR_M_LE = QtWidgets.QTextEdit(self.groupBox_20)
self.DVLR_M_LE.setGeometry(QtCore.QRect(143, 114, 60, 25))
self.DVLR_M_LE.setObjectName("DVLR_M_LE")
self.gridLayout_4.addWidget(self.groupBox_20, 0, 3, 1, 1)
self.groupBox_17 = QtWidgets.QGroupBox(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
self.groupBox_17.setFont(font)
self.groupBox_17.setObjectName("groupBox_17")
self.frame_15 = QtWidgets.QFrame(self.groupBox_17)
self.frame_15.setGeometry(QtCore.QRect(0, 0, 281, 151))
self.frame_15.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_15.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_15.setObjectName("frame_15")
self.CSVD_IP_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_IP_LE.setGeometry(QtCore.QRect(76, 21, 194, 26))
self.CSVD_IP_LE.setObjectName("CSVD_IP_LE")
self.CSVD_P_LB = QtWidgets.QLabel(self.groupBox_17)
self.CSVD_P_LB.setGeometry(QtCore.QRect(11, 53, 48, 16))
self.CSVD_P_LB.setObjectName("CSVD_P_LB")
self.CSVD_IP_LB = QtWidgets.QLabel(self.groupBox_17)
self.CSVD_IP_LB.setGeometry(QtCore.QRect(11, 21, 59, 16))
self.CSVD_IP_LB.setObjectName("CSVD_IP_LB")
self.CSVD_P_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_P_LE.setGeometry(QtCore.QRect(76, 53, 194, 25))
self.CSVD_P_LE.setObjectName("CSVD_P_LE")
self.CSVD_V_LB = QtWidgets.QLabel(self.groupBox_17)
self.CSVD_V_LB.setGeometry(QtCore.QRect(11, 84, 51, 16))
self.CSVD_V_LB.setObjectName("CSVD_V_LB")
self.CSVD_D_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_D_LE.setGeometry(QtCore.QRect(76, 115, 61, 25))
self.CSVD_D_LE.setObjectName("CSVD_D_LE")
self.CSVD_Y_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_Y_LE.setGeometry(QtCore.QRect(209, 115, 61, 25))
self.CSVD_Y_LE.setObjectName("CSVD_Y_LE")
self.CSVD_M_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_M_LE.setGeometry(QtCore.QRect(143, 115, 60, 25))
self.CSVD_M_LE.setObjectName("CSVD_M_LE")
self.CSVD_D_LB = QtWidgets.QLabel(self.groupBox_17)
self.CSVD_D_LB.setGeometry(QtCore.QRect(11, 115, 26, 16))
self.CSVD_D_LB.setObjectName("CSVD_D_LB")
self.CSVD_V_LE = QtWidgets.QTextEdit(self.groupBox_17)
self.CSVD_V_LE.setGeometry(QtCore.QRect(76, 84, 194, 25))
self.CSVD_V_LE.setObjectName("CSVD_V_LE")
self.gridLayout_4.addWidget(self.groupBox_17, 0, 0, 1, 1)
self.gridLayout_5.addLayout(self.gridLayout_4, 3, 0, 1, 1)
self.frame_2 = QtWidgets.QFrame(IPSetting)
self.frame_2.setGeometry(QtCore.QRect(0, 0, 1161, 641))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.frame_2.raise_()
self.frame.raise_()
self.retranslateUi(IPSetting)
QtCore.QMetaObject.connectSlotsByName(IPSetting)
def retranslateUi(self, IPSetting):
_translate = QtCore.QCoreApplication.translate
IPSetting.setWindowTitle(_translate("IPSetting", "Form"))
self.SA_BTN.setText(_translate("IPSetting", "SAVE"))
self.CNCL_BTN.setText(_translate("IPSetting", "CANCLE"))
self.groupBox_4.setTitle(_translate("IPSetting", "CS COMPUTER"))
self.CSC_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSC_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSC_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSC_SW_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_7.setTitle(_translate("IPSetting", "VOIP GATEWAY"))
self.CSVG_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSVG_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSVG_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSVG_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_3.setTitle(_translate("IPSetting", "IP ENCRYPTOR 1"))
self.CSIE1_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSIE1_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSIE1_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSIE1_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_6.setTitle(_translate("IPSetting", "DV LAN SW"))
self.DVLS_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.DVLS_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.DVLS_D_LB.setText(_translate("IPSetting", "DATE"))
self.DVLS_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.groupBox_9.setTitle(_translate("IPSetting", "CS LAN SW 1"))
self.CSLS1_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSLS1_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSLS1_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSLS1_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_10.setTitle(_translate("IPSetting", "MINI IP LOGGER"))
self.CSIL_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSIL_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSIL_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.CSIL_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.groupBox_11.setTitle(_translate("IPSetting", "IP ENCRYPTION 2"))
self.CSIE2_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSIE2_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSIE2_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSIE2_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_12.setTitle(_translate("IPSetting", "DV ENCODER RECODER"))
self.DVVDR_D_LE_2.setText(_translate("IPSetting", "DATE"))
self.DVVDR_IP_LE_2.setText(_translate("IPSetting", "IP ADDRESS"))
self.DVVDR_P_LE_2.setText(_translate("IPSetting", "PORT NO."))
self.DVVDR_V_LE_2.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_16.setTitle(_translate("IPSetting", "DV VOIP PHONE"))
self.DVVP_D_LB.setText(_translate("IPSetting", "DATE"))
self.DVVP_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.DVVP_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.DVVP_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_15.setTitle(_translate("IPSetting", "CS LOS RADIO"))
self.label_81.setText(_translate("IPSetting", "DATE"))
self.label_83.setText(_translate("IPSetting", "PORT NO."))
self.label_82.setText(_translate("IPSetting", "IP ADDRESS"))
self.label_84.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_13.setTitle(_translate("IPSetting", "CS LAN SW 2"))
self.CSLS2_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSLS2_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSLS2_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSLS2_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_14.setTitle(_translate("IPSetting", "TID"))
self.CSTID_V_LB_2.setText(_translate("IPSetting", "DATE"))
self.CSTID_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSTID_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.CSTID_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.groupBox_18.setTitle(_translate("IPSetting", "VOIP PHONE"))
self.CSVP_D_LB.setText(_translate("IPSetting", "DATE"))
self.CSVP_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSVP_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSVP_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_19.setTitle(_translate("IPSetting", "DV COMPUTER"))
self.label_97.setText(_translate("IPSetting", "DATE"))
self.label_98.setText(_translate("IPSetting", "IP ADDRESS"))
self.label_99.setText(_translate("IPSetting", "PORT NO."))
self.label_100.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_20.setTitle(_translate("IPSetting", "DV LOS RADIO"))
self.DVLR_D_LB.setText(_translate("IPSetting", "DATE"))
self.DVLR_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.DVLR_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.DVLR_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.groupBox_17.setTitle(_translate("IPSetting", "VIDEO ENCODER"))
self.CSVD_P_LB.setText(_translate("IPSetting", "PORT NO."))
self.CSVD_IP_LB.setText(_translate("IPSetting", "IP ADDRESS"))
self.CSVD_V_LB.setText(_translate("IPSetting", "SW V. NO."))
self.CSVD_D_LB.setText(_translate("IPSetting", "DATE"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
IPSetting = QtWidgets.QWidget()
ui = Ui_IPSetting()
ui.setupUi(IPSetting)
IPSetting.show()
sys.exit(app.exec_())
|
987,401 | e2bf25ea37337ce47743fe2309aee074772eb02e | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import numpy
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
import Model_helper as helper
modelName = "./Color/weights/depthwise3unpool.pd"
#logName ="./Color/weights/logs_deconv3"
isDropout = True
LABEL_SIZE_C = 2
NUM_CHANNELS_In= 3
keep_prop = 0.7
pool_stride2 =[1, 2, 2, 1]
#depth 2, Aug x3~x4 : 83%, 80% loss 0.041x
#depth 2, deconv : 85%, 82% loss 0.118x
#depthwise 3, deconv : 83%, 79% loss 0.030
depth0 = 3
conv_l0_weights = tf.get_variable("w1", shape=[3, 3, NUM_CHANNELS_In, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_l0_biases = tf.Variable(tf.zeros([depth0]))
conv_m0_weights = tf.get_variable("m0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_m0_biases = tf.Variable(tf.zeros([depth0]))
conv_s0_weights = tf.get_variable("s0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_s0_biases = tf.Variable(tf.zeros([depth0]))
conv_t0_weights = tf.get_variable("t0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_t0_biases = tf.Variable(tf.zeros([depth0]))
conv_p0_weights = tf.get_variable("p0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_p0_biases = tf.Variable(tf.zeros([depth0]))
conv_x0_weights = tf.get_variable("x0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_x0_biases = tf.Variable(tf.zeros([depth0]))
conv_xx0_weights = tf.get_variable("xx0", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_xx0_biases = tf.Variable(tf.zeros([depth0]))
conv_x2_weights = tf.get_variable("x2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_x2_biases = tf.Variable(tf.zeros([depth0]))
conv_p2_weights = tf.get_variable("p2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_p2_biases = tf.Variable(tf.zeros([depth0]))
conv_t2_weights = tf.get_variable("t2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_t2_biases = tf.Variable(tf.zeros([depth0]))
conv_s2_weights = tf.get_variable("s2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_s2_biases = tf.Variable(tf.zeros([depth0]))
conv_m2_weights = tf.get_variable("m2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_m2_biases = tf.Variable(tf.zeros([depth0]))
conv_l2_weights = tf.get_variable("l2", shape=[3, 3, depth0, depth0], initializer =tf.contrib.layers.xavier_initializer())
conv_l2_biases = tf.Variable(tf.zeros([depth0]))
conv_l3_weights = tf.get_variable("l3", shape=[3, 3, depth0, LABEL_SIZE_C], initializer =tf.contrib.layers.xavier_initializer())
conv_l3_biases = tf.Variable(tf.zeros([LABEL_SIZE_C]))
dconv_weights = tf.constant(1.0, shape = [2, 2, depth0, depth0])
def oneHot(max_pos, out_shape):
max_pos = tf.reshape(max_pos,[-1])
oneHot_2d = tf.sparse_to_dense(sparse_indices= max_pos,output_shape= out_shape,sparse_values=1, default_value= 0)
oneHot_2d = tf.cast(oneHot_2d, tf.float32)
return oneHot_2d
def inference(inData, train=False):
isDrop = train and isDropout
#1/2
inData = tf.multiply(inData ,1.0)
if train: inData = helper.Gaussian_noise_layer(inData, 0.3)
in2 = tf.nn.avg_pool(inData,pool_stride2,strides=pool_stride2,padding='SAME')
feature1 = pool = helper.conv2dRelu(in2,conv_l0_weights,conv_l0_biases,isDrop,keep_prop)
#1/4
some,pos0 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
feature2 = pool = helper.conv2dRelu(pool,conv_m0_weights,conv_m0_biases,isDrop,keep_prop)
#1/8
some, pos1 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
feature3 = pool = helper.conv2dRelu(pool,conv_s0_weights,conv_s0_biases,isDrop,keep_prop)
#1/16
some, pos2 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
feature4 = pool = helper.conv2d(pool,conv_t0_weights,conv_t0_biases)
some, pos3 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
feature5 = pool = helper.conv2d(pool,conv_p0_weights,conv_p0_biases)
some, pos4 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
feature6 = pool = helper.conv2d(pool,conv_x0_weights,conv_x0_biases)
some, pos5 = tf.nn.max_pool_with_argmax(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool= tf.nn.max_pool(pool,pool_stride2,strides=pool_stride2,padding='SAME')
pool = helper.conv2d(pool,conv_xx0_weights,conv_xx0_biases)
#채널마다 따로 해야한다
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature6.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos5, feature6.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature6, pool) )
pool = helper.conv2d(pool,conv_x2_weights,conv_x2_biases)
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature5.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos4, feature5.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature5, pool))
pool = helper.conv2d(pool,conv_p2_weights,conv_p2_biases)
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature4.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos3, feature4.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature4, pool) )
pool = helper.conv2d(pool,conv_t2_weights,conv_t2_biases)
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature3.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos2, feature3.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature3, pool) )
pool = helper.conv2d(pool,conv_s2_weights,conv_s2_biases)
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature2.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos1, feature2.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature2, pool))
pool = helper.conv2d(pool,conv_m2_weights,conv_m2_biases)
pool = tf.nn.conv2d_transpose(pool,dconv_weights,output_shape=feature1.get_shape().as_list() , strides=[1, 2, 2, 1],padding='SAME')
oneHot_2d = oneHot(pos0, feature1.get_shape())
unpool = tf.multiply(pool, oneHot_2d)
pool = tf.nn.relu(tf.add(feature1, pool) )
pool = helper.conv2dRelu(pool,conv_l2_weights,conv_l2_biases,isDrop,keep_prop)
pool = helper.conv2dRelu(pool,conv_l3_weights,conv_l3_biases,isDrop,keep_prop)
input_shape = inData.get_shape().as_list()
pool = helper.resize(pool,input_shape[1] ,input_shape[2])
reshape = tf.reshape(pool, [-1,LABEL_SIZE_C])
if not train: reshape = tf.nn.softmax(reshape)
return reshape;
def regullarizer():
loss = tf.nn.l2_loss(conv_l0_weights) + tf.nn.l2_loss(conv_l2_weights) + tf.nn.l2_loss(conv_l3_weights)
loss +=tf.nn.l2_loss(conv_m0_weights) + tf.nn.l2_loss(conv_m2_weights)
loss +=tf.nn.l2_loss(conv_s0_weights) + tf.nn.l2_loss(conv_s2_weights)
loss +=tf.nn.l2_loss(conv_t0_weights) + tf.nn.l2_loss(conv_t2_weights)
loss +=tf.nn.l2_loss(conv_p0_weights) + tf.nn.l2_loss(conv_p2_weights)
loss +=tf.nn.l2_loss(conv_x0_weights) + tf.nn.l2_loss(conv_x2_weights)
loss +=tf.nn.l2_loss(conv_xx0_weights)
return loss
|
987,402 | 870db5a0d849f566410815eddf4291b204bbe85c | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
import torch
import torch.nn as nn
from IPython import embed
class AnomalyLoss(nn.Module):
def __init__(self, weights = [1.0, 1.0], regular = 1.0, use_gpu = True):
super(AnomalyLoss, self).__init__()
weights = torch.tensor(weights, dtype=torch.float32)
if use_gpu:
weights = weights.cuda()
self.reweiCEL = nn.CrossEntropyLoss(weight=weights)
self.CrossEnt = nn.CrossEntropyLoss()
self.regular = regular
def forward(self, pred_score, gt_label):
occ_loss = self.reweiCEL(pred_score[:, :2], gt_label[0])
hos_loss = self.CrossEnt(pred_score[:, 2:], gt_label[1])
ano_loss = self.regular * occ_loss + hos_loss
return ano_loss
|
987,403 | fd7e0da06790657871d292a7b1e6c846b504c5a9 | # Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
import google.datalab.storage as storage
from google.datalab.contrib.pipeline.composer._api import Api
import re
class Composer(object):
""" Represents a Composer object that encapsulates a set of functionality relating to the
Cloud Composer service.
This object can be used to generate the python airflow spec.
"""
gcs_file_regexp = re.compile('gs://.*')
def __init__(self, zone, environment):
""" Initializes an instance of a Composer object.
Args:
zone: Zone in which Composer environment has been created.
environment: Name of the Composer environment.
"""
self._zone = zone
self._environment = environment
self._gcs_dag_location = None
def deploy(self, name, dag_string):
bucket_name, file_path = self.gcs_dag_location.split('/', 3)[2:] # setting maxsplit to 3
file_name = '{0}{1}.py'.format(file_path, name)
bucket = storage.Bucket(bucket_name)
file_object = bucket.object(file_name)
file_object.write_stream(dag_string, 'text/plain')
@property
def gcs_dag_location(self):
if not self._gcs_dag_location:
environment_details = Api.get_environment_details(self._zone, self._environment)
if ('config' not in environment_details or
'gcsDagLocation' not in environment_details.get('config')):
raise ValueError('Dag location unavailable from Composer environment {0}'.format(
self._environment))
gcs_dag_location = environment_details['config']['gcsDagLocation']
if gcs_dag_location is None or not self.gcs_file_regexp.match(gcs_dag_location):
raise ValueError(
'Dag location {0} from Composer environment {1} is in incorrect format'.format(
gcs_dag_location, self._environment))
self._gcs_dag_location = gcs_dag_location
if gcs_dag_location.endswith('/') is False:
self._gcs_dag_location = self._gcs_dag_location + '/'
return self._gcs_dag_location
|
987,404 | fad1419d6e1b6a86d3051a0fa3e2ed64564d290d | from .AbstractSensorController import AbstractSensorController
|
987,405 | 5ec71e68d187539e16aba07f323ce25d5165bc29 | def merge_temp_files(self):
"""Merge temporary files
"""
#merging NASTRAN and GENESIS .dat files
self.genesisfile.close()
self.genesisfile = open(self.genesisfile.name, 'r')
genfile = self.genesisfile
genesislines = genfile.readlines(1000000000)
if not genesislines:
print 'GENESIS BULK DATA NOT FOUND!'
raise
genfile.close()
#
self.nastranfile.close()
nastranfile = open(self.nastranfile.name, 'r')
nastranlines = nastranfile.readlines(1000000000)
if not nastranlines:
print 'NASTRAN BULK DATA NOT FOUND!'
raise
nastranfile.close()
#
mergedfile = open(self.datname, 'w')
#writing GENESIS card initial data
mergedfile.write('$ Executive Control\n')
mergedfile.write('$\n')
mergedfile.write('POST = PUNCH\n')
mergedfile.write('SOL COMPAT1\n')
mergedfile.write('THREADS = 8\n')
mergedfile.write('LENVEC=16000M\n')
mergedfile.write('CEND\n')
mergedfile.write('$\n')
mergedfile.write('$ Solution Control\n')
mergedfile.write('$\n')
mergedfile.write('LINE = 64,80\n')
mergedfile.write('ECHO = UNSORT(PARAM,DOPT)\n')
mergedfile.write('APRINT = LAST\n')
mergedfile.write('DPRINT = LAST\n')
mergedfile.write('UPRINT = LAST\n')
mergedfile.write('SIZING = POST\n')
mergedfile.write('GRAPH = YES\n')
mergedfile.write('$\n')
mergedfile.write('$ Output options\n')
mergedfile.write('$\n')
mergedfile.write('DISP(PLOT) = ALL\n')
mergedfile.write('FORCE(PLOT) = ALL\n')
mergedfile.write('OLOAD(PLOT) = ALL\n')
mergedfile.write('SPCFORCE(PLOT) = ALL\n')
mergedfile.write('STRESS(PLOT) = ALL\n')
mergedfile.write('$\n')
mergedfile.write('$ Loadcase definitions\n')
mergedfile.write('$\n')
subcasecount=0
for i in range(len(self.loads_list)):
subcasecount += 1
mergedfile.write('SUBCASE ' + str(subcasecount) + '\n')
mergedfile.write(' LABEL = Loadcase ' + str(self.loads_list[i]) + '\n')
mergedfile.write(' LOAD = ' + str(self.loads_list[i]) + '\n')
mergedfile.write(' SPC = ' + str(self.spcs_list[i] ) + '\n')
mergedfile.write('BEGIN BULK\n')
mergedfile.write('DSCREEN,STRESS,-0.5,80\n')
mergedfile.write('$\n')
mergedfile.write('$ Parameters\n')
mergedfile.write('$\n')
mergedfile.write('PARAM AUTOSPC YES\n')
mergedfile.write('PARAM GRDPNT 0\n')
mergedfile.write('PARAM MAXRATIO1.0000+8\n')
mergedfile.write('PARAM POST -1\n')
mergedfile.write('$\n')
mergedfile.write('$The following parameter activates BIGDOT\n')
#mergedfile.write('DOPT,10\n')
mergedfile.write('DOPT,' + str(self.num_cycles) + '\n')
mergedfile.write('+,IREDCA,22222\n')
mergedfile.write('+,OPTM,1\n')
mergedfile.write('+,DELX,0.5\n')
mergedfile.write('+,DXMIN,0.1\n')
mergedfile.write('$\n')
#copying GENESIS data
mergedfile.write('$______________________________________________\n')
mergedfile.write('$______________________________________________\n')
mergedfile.write('$\n')
mergedfile.write('$ BEGINNING GENESIS BULK DATA\n')
mergedfile.write('$______________________________________________\n')
mergedfile.write('$______________________________________________\n')
for i in genesislines:
mergedfile.write (i)
startnext=False
#copying NASTRAN data
mergedfile.write('$______________________________________________\n')
mergedfile.write('$______________________________________________\n')
mergedfile.write('$\n')
mergedfile.write('$ BEGINNING NASTRAN BULK DATA\n')
mergedfile.write('$______________________________________________\n')
mergedfile.write('$______________________________________________\n')
skip_flag = False
for i in nastranlines:
if i[:5]=='PARAM': continue
if startnext==True:
if i.find(',') > -1:
mergedfile.write(i)
elif i.strip()[0:1] == '$':
continue
elif len(i.strip()) <= 1:
continue
elif i.find('ENDDATA') > -1:
continue
elif skip_flag:
if i[:8].strip() == '+':
continue
else:
skip_flag = False
elif i[:4] == 'CBAR':
mergedfile.write(i.strip()[:64] + '\n')
continue
if i[:8].strip() in self.newprops.keys():
pcard = i[:8].strip()
pid = int(i[8:16].strip())
if pid in self.newprops[ pcard ].keys():
skip_flag = True
continue
mergedfile.write(i.strip()[:72] + '\n')
if i.find('BEGIN BULK')>-1 and startnext==False:
startnext = True
mergedfile.write('ENDDATA\n')
mergedfile.close()
|
987,406 | 517183437cbb24f669abee9fa64111c6cb9b0d32 | '''
Pradeepti Tandra
CS5001 Fall 2020
HW 1 - ATM
'''
def main():
# obtaining input from user
money = int(input("Welcome to PDQ Bank! How much to withdraw?: "))
FIFTIES = money // 50
TWENTIES = (money - (FIFTIES * 50)) // 20
TENS = (money - ((FIFTIES * 50) + (TWENTIES * 20))) // 10
FIVES = (money - ((FIFTIES * 50) + (TWENTIES * 20) + (TENS * 10))) // 5
ONES = (money - ((FIFTIES * 50) + (TWENTIES * 20) + (TENS * 10) + (FIVES * 5)))
print("Chaching!\n",
"You asked for $", money,
"\n That breaks down to:\n",
FIFTIES, "fifties,\n",
TWENTIES, "twenties,\n",
TENS, "tens,\n",
FIVES, "fives,\n",
ONES, "ones")
if __name__ == "__main__":
main() |
987,407 | eae5a8e91fb916a9c1922965d1b1025eef3258a8 | import numpy as np
import random
from numpy import random as rm
import matplotlib.pyplot as plt
def generate_arriving(num_people, top_floor):
people_arriving_time_dict = {}
people_arriving_time = []
rm.seed = 666
# people will arrive between 0-2700 interation (say 3 hours 8:00-11:00 in morning), obey poisson (exponential) distirbution
for i in range(num_people):
people_arriving_time.append(int(rm.exponential(500,1)))
for people in people_arriving_time:
# conver outlier to 2700
if people > 2700:
people = 2700
if people not in people_arriving_time_dict:
people_arriving_time_dict[people] = 1
else:
people_arriving_time_dict[people] += 1
arriving_time_list = people_arriving_time_dict.keys()
arriving_time = []
customer_floor = []
arriving_time_list.sort()
for time in arriving_time_list:
arriving_time.append(time)
aim_floor = []
for i in range(people_arriving_time_dict[time]):
aim_floor.append(rm.randint(2, top_floor))
customer_floor.append(aim_floor)
return arriving_time, customer_floor
def generate_normal(num_people, top_floor):
random.seed(666)
people_leaving_time = []
people_back_time = []
people_floor = []
# normal leaving time is between [2700, 6300]
# the leaving time range is [900,1800], say 1-2 hours
leaving_time_list = random.sample(range(2700,6300),num_people)
# record leaving time
for time in leaving_time_list:
people_leaving_time.append(time)
people_leaving_time.sort()
# record back time
for time in people_leaving_time:
people_back_time.append(time + random.randint(900,1800))
# record floor
for i in range(num_people):
random.seed(i)
temp_floor = [random.randint(2,top_floor)]
people_floor.append(temp_floor)
return people_back_time, people_floor, people_leaving_time, people_floor
def generate_leaving(num_people, top_floor):
random.seed(666)
people_leaving_time = []
people_floor = []
# normal leaving time is between [8100, 10800], say 3 hours 17:00 - 20:00 in the afternoon
leaving_time_list = random.sample(range(8100,10800), num_people)
# record leaving time
for time in leaving_time_list:
people_leaving_time.append(time)
people_leaving_time.sort()
# record floor
for i in range(num_people):
random.seed(i*i)
temp_floor = [random.randint(2,top_floor)]
people_floor.append(temp_floor)
return people_leaving_time, people_floor
# Define the function to combine the action in a day
def generate_day(num_people, top_floor):
arriving_time = []
customer_floor = []
calling_time = []
customer_current_floor = []
arriving_time_1, customer_floor_1 = generate_arriving(num_people, top_floor)
arriving_time_2, customer_floor_2, calling_time_1, customer_current_floor_1 = generate_normal(num_people, top_floor)
calling_time_2, customer_current_floor_2 = generate_leaving(num_people, top_floor)
arriving_time.extend(arriving_time_1)
arriving_time.extend(arriving_time_2)
customer_floor.extend(customer_floor_1)
customer_floor.extend(customer_floor_2)
calling_time.extend(calling_time_1)
calling_time.extend(calling_time_2)
customer_current_floor.extend(customer_current_floor_1)
customer_current_floor.extend(customer_current_floor_2)
return arriving_time, customer_floor, calling_time, customer_current_floor
def split_day(arriving_time, customer_floor, calling_time, customer_current_floor, ratio):
random.seed(666)
sub_size_1 = int(len(arriving_time) * (ratio))
sub_size_2 = int(len(calling_time) * (ratio))
index_1 = random.sample(range(len(arriving_time)), sub_size_1)
index_2 = random.sample(range(len(calling_time)), sub_size_2)
index_1.sort()
index_2.sort()
arriving_time_1 = []
customer_floor_1 = []
calling_time_1 = []
customer_current_floor_1 = []
arriving_time_2 = []
customer_floor_2 = []
calling_time_2 = []
customer_current_floor_2 = []
for i in range(len(arriving_time)):
if i in index_1:
arriving_time_1.append(arriving_time[i])
customer_floor_1.append(customer_floor[i])
else:
arriving_time_2.append(arriving_time[i])
customer_floor_2.append(customer_floor[i])
for i in range(len(calling_time)):
if i in index_2:
calling_time_1.append(calling_time[i])
customer_current_floor_1.append(customer_current_floor[i])
else:
calling_time_2.append(calling_time[i])
customer_current_floor_2.append(customer_current_floor[i])
return arriving_time_1, customer_floor_1, calling_time_1, customer_current_floor_1, arriving_time_2, customer_floor_2, calling_time_2, customer_current_floor_2
|
987,408 | cd5e11a7b49a676a20bac2b31243d2fd91833a1a | me0 = "Utils"
import numpy as np
import os, time, subprocess
from datetime import datetime
"""
NAME
Utils.py
PURPOSE
Supporting functions for kinetic proofreding project.
EXECUTION
None
"""
##================================================
## PLOTTING
##================================================
fs = {"fsa":30,"fsl":26,"fst":20,"fsn":26,"figsize":(10,10),"saveext":"pdf"}
# fs = [14,12,14]
# figsize = (4,4)
def set_mplrc(fs):
"""
Set MPL defaults
"""
import matplotlib as mpl
from cycler import cycler
## Number format
mpl.rc("axes.formatter", limits=(-3,3))
## Lines
mpl.rc("lines", linewidth=2, markersize=8)
# mpl.rc("axes", prop_cycle=cycler('color',["348ABD","7A68A6","A60628","467821","CF4457","188487","E24A33"])) ## Not working
## Labels and legend
mpl.rcParams["xtick.labelsize"] = fs["fsn"]
mpl.rcParams["ytick.labelsize"] = fs["fsn"]
mpl.rc("axes", labelsize=fs["fsa"])
mpl.rc("legend", fontsize=fs["fsl"], fancybox=True)#framealpha=0.5,
## Font
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = ['Computer Modern Roman']
mpl.rcParams['text.usetex'] = True
## DFM
# rcParams["font.family"] = "sans-serif"
# rcParams["font.sans-serif"] = ["Computer Modern Sans"]
# rcParams["text.usetex"] = True
# rcParams["text.latex.preamble"] = r"\usepackage{cmbright}"
## Figure properties
mpl.rc("figure", figsize=fs["figsize"])
mpl.rc("savefig", format="jpg", dpi=200)
return
##==========================================
## INPUT / OUTPUT
##==========================================
def filename_par(filename, searchstr):
"""
Scrape filename for parameter
"""
start = filename.find(searchstr) + len(searchstr)
finish = start + 1
while unicode(filename[start:].replace(".",""))[:finish-start].isnumeric():
finish += 1
return float(filename[start:finish-1])
## ====================================================================
def check_path(histfile, vb):
"""
Check whether directory exists; and if existing file will be overwritten.
"""
me = "Utils.check_path: "
if os.path.isfile(histfile):
raise IOError(me+"file",histfile,"already exists. Not overwriting.")
try:
assert os.path.isdir(os.path.dirname(histfile))
except AssertionError:
os.mkdir(os.path.dirname(histfile))
if vb: print me+"Created directory",os.path.dirname(histfile)
return
def create_readme(histfile, vb):
"""
If no readme exists, make one.
NOTE commit is the LAST COMMIT -- maybe there have been changes since then.
Assumes directory exists.
"""
me = "Utils.create_readme: "
readmefile = os.path.dirname(histfile)+"/README.txt"
try:
assert os.path.isfile(readmefile)
except AssertionError:
now = str(datetime.now().strftime("%Y-%m-%d %H.%M"))
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
header = "Time:\t"+now+"\nCommit hash:\t"+commit+"\n\n"
with open(readmefile,"w") as f:
f.write(header)
if vb: print me+"Created readme file "+readmefile
return
## ====================================================================
|
987,409 | cbc91b879f032cf3d7a1684242753f8bd6d90002 | users={
1000:{'empid':1000,'empname':'ajay','job':'developer','sal':25000},
1001:{'empid':1001,'empname':'ram','job':'developer','sal':20000},
1002:{'empid':1002,'empname':'arun','job':'qa','sal':3000},
1003:{'empid':1003,'empname':'nithin','job':'qa','sal':15000}
}
def valid(**kwargs):
user=kwargs['acno']
password=kwargs['password']
if user in users:
if password == users[user]['password']:
print('success')
else:
print('invalid password')
else:
print('invalid account no')
valid(acno=1000,password='user0') |
987,410 | 0b2c2e57ba77e6a89de67eca6757957fbf540e31 | from typing import List
import discord
import yaml
from src.models.role import Role
from src.models.temp_ban import TempBan
from src.utils.api_manager import APIManager
from src.utils.embeds_manager import EmbedsManager
from src.utils.permissions import PermissionChecker
async def unbantemp_member(client: discord.Client, message: discord.Message, args: List[str], config):
api_manager = APIManager(config['api']['url'], config['api']['token'])
if not PermissionChecker.is_moderator(message.author):
return await message.channel.send(
embed=EmbedsManager.error_embed("Vous n'avez pas les permissions nécessaires.")
)
# Display help
if args and args[0] == '-h':
return await message.channel.send(
embed=EmbedsManager.information_embed("**Rappel de la commande de bannissement temporaire :**\n\n"
"`!eb <@user>`.")
)
# Check if target exist
target: discord.Member = message.mentions[0] if len(message.mentions) == 1 else False
if not target:
return await message.channel.send(
embed=EmbedsManager.error_embed("Erreur dans la commande. Vous devez mentionner un utilisateur.")
)
state, res = api_manager.get_data(
'temp-bans',
user_id=str(target.id),
is_active=True,
)
if not state:
return await message.channel.send(
embed=EmbedsManager.error_embed("Erreur dans l'api. Merci de contacter gast.")
)
if len(res) == 0:
return await message.channel.send(
embed=EmbedsManager.error_embed(f"Erreur dans la commande. {target.display_name} n'a pas de bantemp actif")
)
# Remove roles
with open("src/_data/roles.yml", 'r') as stream:
roles = yaml.safe_load(stream)
role = [Role(data=x) for x in roles if x['slug'].startswith('ban')]
for r in role:
await target.remove_roles(message.guild.get_role(r.role_id))
# Send message
await message.channel.send(
embed=EmbedsManager.sanction_embed(
f"Vous venez de retirer le bantemp de {target.display_name}"
)
.set_footer(icon_url=client.user.avatar_url, text='Made By Gastbob40')
)
await client.get_channel(config['channels']['log_reactions']).send(
embed=EmbedsManager.sanction_embed(
f"Le bantemp de {target.display_name} vient d'être retiré.",
f"Auteur : {message.author.display_name}")
.set_footer(icon_url=client.user.avatar_url, text='Made By Gastbob40')
)
try:
await target.send(
embed=EmbedsManager.sanction_embed(
f"{message.author.display_name} vient de retirer votre bantemp."
)
.set_footer(icon_url=client.user.avatar_url, text='Made By Gastbob40')
)
except:
pass
# Update data
temp_bans = [TempBan(data=x) for x in res]
for bt in temp_bans:
bt.is_active = False
bt.update()
# Reset permission
for channel in message.guild.channels:
try:
if isinstance(channel, discord.TextChannel):
if not target.permissions_in(channel).send_messages:
await channel.set_permissions(target,
overwrite=None)
elif isinstance(channel, discord.VoiceChannel):
if not target.permissions_in(channel).connect:
await channel.set_permissions(target,
overwrite=None)
except:
pass
|
987,411 | 37196ee2140d9cded7c857910549c1600a7b47a1 | # 伪私有属性和私有方法
"""
在Python 中,并没有真正的私有
在级属性,方法命名时,实际是对名称做了一些特殊处理,使得外界无法访问到
处理方式:在名称前面加上: 定义__类名, 调用:_类名__名称
"""
class Women:
def __init__(self, name):
self.name = name
self.__age = 18 # 私有
def __secret(self):
return self.__age
if __name__ == '__main__':
rose = Women('rose')
# 伪属性,方法
print(rose._Women__age) # 18
print(rose._Women__secret()) # 18
|
987,412 | 5b8aac3c806b30986204bf112a8891cf07804ddd | import math
def is_prime4(n=1013):
if n<2:
return False
if n==2:
return True # 2 is prime
if n%2 ==0:
print(n, "is divisable by 2")
return False
m = math.sqrt(n)
#print ("using sqrt m is: ",m)
m = int(m)+1
#print("m+1 is : ",m)
for x in range(3,m,2):
if n%x == 0:
print(n, "is divisable by: ",x)
return False
return True
def is_prime3(n=1013):
if n==2:
return True # 2 is prime
if n%2 ==0:
print(n, " is divisable by 2")
return False
if n<2:
return False
prime = True
m = n//2+1
# print("m is ",m)
for x in range(3, m,2):
if n%x == 0:
print(n, "is divisable by: ",x)
prime = False
return prime
return prime
import timeit
t1 = timeit.timeit(is_prime3)
t2 = timeit.timeit(is_prime4)
print("t1, prime3 : ",t1, "t2, prime4 : ",t2, t1/t2)
|
987,413 | e0b4ef78644c6dd4b3802a18d8aeafe7b0add7cb | class Event:
def __init__(self, request):
self.account = request['AccountSid']
self.from_ = request['From']
self.to = request['To']
try:
self.from_city = request['FromCity']
except KeyError:
self.from_city = None
try:
self.from_state = request['FromState']
except KeyError:
self.from_state = None
try:
self.from_zip = request['FromZip']
except KeyError:
self.from_zip = None
try:
self.from_country = request['FromCountry']
except KeyError:
self.from_country = None
try:
self.to_city = request['ToCity']
except KeyError:
self.to_city = None
try:
self.to_state = request['ToState']
except KeyError:
self.to_state = None
try:
self.to_zip = request['ToZip']
except KeyError:
self.to_zip = None
try:
self.to_country = request['ToCountry']
except KeyError:
self.to_country = None
def handle(self, flow):
raise NotImplementedError()
|
987,414 | e950e66a848d21d42476f839bd439950eeb8c1b2 | from django.urls import path
from . import views
urlpatterns = [
path('submit/<str:refreshToken>/', views.submit, name='admin_submit'),
path('officer_submit/<str:refreshToken>/', views.officer_submit, name='officer_submit'),
path('officer/<str:refreshToken>/', views.officer, name='officer_reports'),
path('admin/<str:refreshToken>/', views.admin, name='admin_reports'),
]
|
987,415 | b6e7732aa787e93c5d5c1f1978da0bfa1b891cb0 | nome = input()
notaAntiga = float(input())
notaNova = float(input())
nomesTransc=[]
notasTransc=[]
with open('ex4.txt', 'r') as f:
nomeFile = f.readline()
notasArrFile = list(map(float, f.readline().split()))
while nomeFile != '' and notasArrFile != '':
if nome == nomeFile[:len(nomeFile)-1]:
for i in range(len(notasArrFile)):
if notasArrFile[i] == notaAntiga:
notasArrFile[i] = notaNova
notasTransc.append(notasArrFile)
nomesTransc.append(nomeFile)
nomeFile = f.readline()
notasArrFile = list(map(float, f.readline().split()))
with open('ex4.txt', 'w') as f:
for i in range(len(nomesTransc)):
f.write(nomesTransc[i])
for j in range(len(notasTransc[i])):
if j == len(notasTansc[i]-1):
f.write(str(notasTransc[i][j])+'\n')
else:
f.write(str(notasTransc[i][j])+' ')
|
987,416 | 34a345d5cbdd566ea7a9e88db67383a5719208ed | from django.conf.urls.defaults import *
from django.core.urlresolvers import get_callable
from django.core.urlresolvers import RegexURLPattern
from lithium.conf import settings
from lithium.views.date_based import *
from lithium.blog.models import Post
from lithium.blog.feeds import LatestPosts, LatestPostsByTag, LatestPostsByAuthor
from lithium.blog.decorators import private_post_decorator
list_dict = {
'queryset': Post.on_site.all(),
'date_field': 'pub_date',
'paginate_by': settings.BLOG_PAGINATE_BY,
'template_object_name': 'post',
}
detail_dict = {
'queryset': Post.on_site.all(),
'date_field': 'pub_date',
'template_object_name': 'post',
}
urlpatterns = patterns('',
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[-\w]+)/$', private_post_decorator(object_detail), detail_dict, 'blog.post_detail'),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', private_post_decorator(archive_day), list_dict, 'blog.archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$', private_post_decorator(archive_month), list_dict, 'blog.archive_month'),
url(r'^(?P<year>\d{4})/$', private_post_decorator(archive_year), list_dict, 'blog.archive_year'),
url(r'^$', private_post_decorator(archive_index), list_dict, 'blog.post_list'),
url(r'^author/(?P<author>[-\w]+)/?$', private_post_decorator(archive_index), list_dict, 'blog.author_detail'),
url(r'^tag/(?P<tag>[-\w]+)/$', private_post_decorator(archive_index), list_dict, 'blog.category_detail'),
)
feeds = {
'latest': LatestPosts,
'tag': LatestPostsByTag,
'author': LatestPostsByAuthor,
}
urlpatterns += patterns('django.contrib.syndication.views',
url(r'^feed/(?P<url>.*)/$', 'feed', {'feed_dict': feeds}, name='blog.feeds'),
)
|
987,417 | 17a2fa9d37f9287f9ac59bd51fb7bf08f0df55f5 | import math
import random
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
from scene_entities import Scene
def normalize_vector(vector):
return vector / np.linalg.norm(vector)
def length_vector(vector):
return np.linalg.norm(vector)
def arbitrary_vector_in_plane(normal, D, xyz):
V = np.zeros(3)
for i in range(3):
if normal[i] != 0:
V[i] = -D / normal[i]
break
return normalize_vector(xyz - V)
def reflected_vector(V, N):
return V - 2 * np.dot(V, N) * N
def get_reflection_color(V, intersect_object, scene, rec_depth):
intersect_surface = intersect_object[2]
ref_color = intersect_surface.get_material(scene).reflection_color
bg = scene.sett.background_color_3d
eps = 1e-5
if np.sum(ref_color) < eps:
return np.zeros(3)
N = intersect_object[3]
R = reflected_vector(V, N)
intersect_point = intersect_object[1]
# shifted_point = intersect_point + 1e-5 * N
# intersections = find_intersections(intersect_point, R, scene)
intersections = find_intersections(intersect_point, R, scene)
if intersections == []:
return ref_color * bg
nearest_object = intersections[0]
if nearest_object[2] == intersect_surface: # not supposed to happen
return np.zeros(3)
return ref_color * get_color(R, intersections, scene, rec_depth - 1)
def get_diff_spec_color(V, intersect_object, scene):
lig_intensity_list = soft_shadow(intersect_object, scene)
N = intersect_object[3] # surface normal
intersect_point = intersect_object[1]
color = np.zeros(3)
Kd = intersect_object[2].get_material(scene).difuse_color
Ks = intersect_object[2].get_material(scene).spec_color
n = intersect_object[2].get_material(scene).phong
for i, light in enumerate(scene.lights):
# Idiff = Kd * Ip * dot(N,P)
L = normalize_vector(light.pos_3d - intersect_point)
cos_theta = np.dot(N, L)
if cos_theta > 0:
color += Kd * cos_theta * lig_intensity_list[i] * light.color_3d
# Ispec = Ks * Ip * dot(H,N) ** n
R = 2 * np.dot(L, N) * N - L
cos_phi = np.dot(-V, R)
if cos_phi > 0:
color += Ks * lig_intensity_list[i] * np.power(cos_phi, n) * light.color_3d * light.spec
return color
# If the point is reached immediately, return 1
# If an opaque object is on the way, return 0
# If point is reached through transparent objects return a value between 0 and 1
# that is equal to the trans coeff multiplication of all the transparent objects
# being crossed
def shadow_reach_via_trans_objs(li_intersections, intersect_obj, scene):
intersect_point = intersect_obj[1]
intersect_surface = intersect_obj[2]
eps = 1e-5
trans_val = 1
for i, li_intersect_obj in enumerate(li_intersections):
li_intersect_obj = li_intersections[i]
if li_intersect_obj[2] != intersect_surface:
if li_intersect_obj[2].get_material(scene).trans <= 0:
return 0
else:
if length_vector(intersect_point - li_intersect_obj[1]) < eps:
return trans_val
else:
return 0
trans_val *= li_intersect_obj[2].get_material(scene).trans
return trans_val
def hard_shadow(intersect_obj, light, scene):
eps = 1e-5
intersect_point = intersect_obj[1]
ray = normalize_vector(intersect_point - light.pos_3d)
intersections = find_intersections(light.pos_3d, ray, scene)
if intersections == []: # not supposed to happen
return 1 - light.shadow
nearest_object = intersections[0]
if nearest_object[2] == intersect_obj[2]:
if length_vector(intersect_point - nearest_object[1]) < eps:
return (1 - light.shadow) * 1 + light.shadow * 1
trans_value = shadow_reach_via_trans_objs(intersections, intersect_obj, scene)
return 1 - light.shadow + trans_value
def soft_shadow(intersect_object, scene):
intersect_point = intersect_object[1]
light_intensity_list = []
N = scene.sett.shadow_rays
for light in scene.lights:
L = normalize_vector(intersect_point - light.pos_3d)
# coefficients of perpendicular plane to the ray
# from light to intersection point
D = -np.dot(light.pos_3d, L)
if light.width <= 0:
light_intensity = hard_shadow(intersect_object, light, scene)
light_intensity_list.append(light_intensity)
continue
V1 = arbitrary_vector_in_plane(L, D, light.pos_3d) / light.width
V2 = np.cross(V1, L) / light.width
# square origin point
P0 = light.pos_3d - (light.width / 2) * V1 - (light.width / 2) * V2
num_hits = 0
cell_edge = light.width / N
for i in range(N):
for j in range(N):
P = P0 + V2 * (i * cell_edge + random.uniform(0, cell_edge))
P = P + V1 * (j * cell_edge + random.uniform(0, cell_edge))
ray = normalize_vector(intersect_point - P)
intersections = find_intersections(P, ray, scene)
if intersections == []:
continue
num_hits += shadow_reach_via_trans_objs(intersections, intersect_object, scene)
hit_ratio = num_hits / (N * N)
light_intensity = ((1 - light.shadow) + light.shadow * hit_ratio)
light_intensity_list.append(light_intensity)
return light_intensity_list
def get_plane_intersection(ray_origin, ray_direction, plane):
N = plane.normal_3d
d = -plane.offset
t = -(np.dot(ray_origin, N) + d) / np.dot(ray_direction, N)
if t <= 1e-4:
return None
intersection_point = ray_origin + t * ray_direction
return t, intersection_point, plane, plane.normal_3d
def is_intersected(box, ray_origin, ray_direction):
invdir = 1 / ray_direction
sign = (invdir < 0).astype(np.int)
bounds = [box.min_bound, box.max_bound]
tmin = (bounds[sign[0]][0] - ray_origin[0]) * invdir[0]
tmax = (bounds[1 - sign[0]][0] - ray_origin[0]) * invdir[0]
tymin = (bounds[sign[1]][1] - ray_origin[1]) * invdir[1]
tymax = (bounds[1 - sign[1]][1] - ray_origin[1]) * invdir[1]
if ((tmin > tymax) or (tymin > tmax)):
return False, 0
if (tymin > tmin):
tmin = tymin
if (tymax < tmax):
tmax = tymax
tzmin = (bounds[sign[2]][2] - ray_origin[2]) * invdir[2]
tzmax = (bounds[1 - sign[2]][2] - ray_origin[2]) * invdir[2]
if ((tmin > tzmax) or (tzmin > tmax)):
return False, 0
if (tzmin > tmin):
tmin = tzmin
if (tzmax < tmax):
tmax = tzmax
t = tmin
if (t < 0):
t = tmax
if (t < 0):
return False, 0
if t <= 1e-4:
return False, 0
return True, t
def find_intersections(ray_origin, ray_direction, scene: Scene):
intersections = []
for box in scene.boxes:
return_value = is_intersected(box, ray_origin, ray_direction)
if return_value[0]:
intersection_point = ray_origin + return_value[1] * ray_direction
for plane in box.planes:
intersect_obj = get_plane_intersection(ray_origin, ray_direction, plane)
if intersect_obj is not None:
if np.allclose(intersect_obj[1], intersection_point):
intersections.append(intersect_obj)
for plane in scene.planes:
intersect_obj = get_plane_intersection(ray_origin, ray_direction, plane)
if intersect_obj is not None:
intersections.append(intersect_obj)
for sphere in scene.spheres:
# geometric method
L = sphere.center_3d - ray_origin
t_ca = np.dot(L, ray_direction)
if t_ca < 0:
continue # no intersection
d_power2 = np.dot(L, L) - t_ca ** 2
r_power2 = sphere.radius ** 2
if d_power2 > r_power2:
continue # the intersection is outside of the sphere
t_hc = math.sqrt(r_power2 - d_power2)
t = min(t_ca - t_hc, t_ca + t_hc) # distance
intersection_point = ray_origin + t * ray_direction
N = normalize_vector(intersection_point - sphere.center_3d)
intersections.append((t, intersection_point, sphere, N))
return sorted(intersections, key=lambda t : t[0])
def get_color(trace_ray, intersections, scene, rec_depth):
if intersections == [] or rec_depth <= 0:
return scene.sett.background_color_3d
intersect_object = intersections[0]
trans = intersect_object[2].get_material(scene).trans
trans_color = np.zeros(3)
if trans > 0:
trans_color = get_color(trace_ray, intersections[1:], scene, rec_depth - 1)
diff_spec = get_diff_spec_color(trace_ray, intersect_object, scene)
ref_color = get_reflection_color(trace_ray, intersect_object, scene, rec_depth)
return trans * trans_color + (1 - trans) * diff_spec + ref_color
def trace_ray_from_camera(intersections, scene):
if intersections == []:
return scene.sett.background_color_3d
V = normalize_vector(intersections[0][1] - scene.camera.pos_3d)
return get_color(V, intersections, scene, scene.sett.rec_max)
def ray_casting(scene: Scene, image_width=500, image_height=500):
camera = scene.camera
Vz = normalize_vector(camera.look_at_3d - camera.pos_3d) # towards
# set screen original point
screen_center_point = camera.pos_3d + camera.sc_dist * Vz
screen_aspect_ratio = image_width / image_height
screen_width = camera.sc_width
screen_height = screen_width / screen_aspect_ratio
Vx = (normalize_vector(np.cross(camera.up_3d, Vz)) * screen_width) / image_width # right
Vy = (normalize_vector(np.cross(Vx, Vz)) * screen_height) / image_height
screen_orig_point = screen_center_point - (image_width / 2) * Vx - (image_height / 2) * Vy
P0 = np.copy(screen_orig_point)
screen = np.zeros((image_height, image_width, 3))
for i in range(image_height):
p = np.copy(P0)
for j in range(image_width):
if camera.fisheye:
sensor_radius = length_vector(p - screen_center_point)
f = camera.sc_dist
k = camera.k_val
if 0 < k <= 1:
theta = np.arctan((k * sensor_radius) / f) / k
elif k == 0:
theta = sensor_radius / f
elif -1 <= k < 0:
theta = np.arcsin((k * sensor_radius) / f) / k
else:
raise Exception("not supported k")
# check degrees
if theta < np.pi / 2:
image_radius = math.tan(theta) * camera.sc_dist
mid_to_point = normalize_vector(p - screen_center_point)
new_point = screen_center_point + mid_to_point * image_radius
ray_direction_fish = normalize_vector(new_point - camera.pos_3d)
intersections = find_intersections(camera.pos_3d, ray_direction_fish, scene)
color = trace_ray_from_camera(intersections, scene)
screen[i][j] = np.clip(color, 0, 1)
else: # not fisheye
ray_direction_straight = normalize_vector(p - camera.pos_3d)
intersections = find_intersections(camera.pos_3d, ray_direction_straight, scene)
color = trace_ray_from_camera(intersections, scene)
screen[i][j] = np.clip(color, 0, 1)
p += Vx
P0 += Vy
return screen
def main():
scene_file_path = sys.argv[1]
out_path = sys.argv[2]
if len(sys.argv) > 3:
image_width = int(sys.argv[3])
else:
image_width = 500
if len(sys.argv) > 4:
image_height = int(sys.argv[4])
else:
image_height = 500
scene = Scene(scene_file_path)
screen = ray_casting(scene, image_width, image_height)
plt.imsave(out_path, screen)
if __name__ == "__main__":
main()
|
987,418 | ed4490848bb1a7eea3034cd65742fea505e9385d | # encoding: utf-8
import scrapy
from scrapy.http import Request
import csv
import re
import scrapy
from scrapy.http import Request
import csv
import time
# import MySQLdb as mdb
# con=mdb.connect("localhost","r","","xad_database")
from scrapy.http import FormRequest
import pprint
import MySQLdb
from random import randint
# from time import sleep
con = MySQLdb.connect(host ="localhost", user="root",passwd="root",db="Test")
cursor =con.cursor()
output_csv = csv.writer(open('geodriud_data.csv', 'wb'))
output_csv.writerow(['BrandName','StoreName' , 'RawAddress', 'Full_Street', 'City','State','Zipcode', 'PhoneNumber'])
class geogoidCraw(scrapy.Spider):
name ='geo'
# allowed_domains = ["http://easternusa.salvationarmy.org/"]
start_urls = [
# "http://www.geodruid.com/intl/en/brands/4995:promod/DE:germany",
# "http://www.geodruid.com/intl/en/brands/9066:stefanel/DE:germany",
# "http://www.geodruid.com/intl/en/brands/6419:only/DE:germany",
# "http://www.geodruid.com/intl/en/brands/6943:engbers/DE:germany"
# "http://www.geodruid.com/intl/en/brands/6599:edc-by-esprit/DE:germany"
]
#"http://www.geodruid.com/intl/en/brands/6931:betty-barclay/DE:germany"]#"http://www.geodruid.com/intl/en/brands/1135:bosch-car-service/DE:germany"]
# start_urls =[#'http://www.geodruid.com/intl/en/place/1749659-mezger-gmbh-co-kg-autoreparatur-bamberg-deutschland']
# start_urls =["http://www.geodruid.com/intl/en/brands/268:volkswagen/DE:germany"]
# def start_requests(self):
# reader = csv.reader(open('/home/deepak/Downloads/GERMANY- CATEGORY SCRAP- FASHION STORES - Fashion Stores.csv',"rb"))
# reader.next()
# for row in reader:
# urls = row[4]
# if "geodruid" in urls:
# yield Request(url = urls , callback = self.parse)
# http_handle_list =[302]
start_urls =['https://www.fressnapf.de/marktfinder/']
def parse(self, response):
if 'geodriud' in response.url:
links = response.xpath('//div[@class="result-list-ctrl"]/ul/li/a/@href').extract()
if not links:
yield Request(url = response.url, callback = self.parse_next)
for link in links:
link = 'http://www.geodruid.com' + link
yield Request(url = link, callback = self.parse_next)
elif 'fressnapf.de' in response.url:
links = response.xpath('//div[@class="store-list"]/ul/li/a/@href').extract()
for link in links:
link = 'https://www.fressnapf.de' + link
yield Request(url = link, callback = self.parse_next)
def parse_next(self, response):
print response.url
if 'geodriud' in response.url:
linkss = response.xpath('//div[@class="poi-lstsq-info-name"]/a/@href').extract()
# print linkss, len(set(linkss))
BrandName ="".join(response.xpath('//li[@class="step_just_done"]/a/text()').extract())
time.sleep(randint(3,7))
lst = []
for link in linkss:
link ='http://www.geodruid.com' + link
# lst.append(link)
yield Request(url = link, callback = self.parse_last,meta ={'brand_name':BrandName})
elif 'fressnapf.de' in response.url:
links = response.xpath('//a[@class="store-item"]/@href').extract()
for link in links:
link = 'https://www.fressnapf.de' + link +'/markt'
print link
yield Request(url = link, callback = self.parse_last)
def parse_last(self, response):
if 'geodruid' in response.url:
try:
BrandName = response.meta['brand_name']
# BrandName ='hello'
BussinessName = "".join(response.xpath('//meta[@property="og:title"]/@content').extract()).strip()
Full_Street = "".join(response.xpath('//meta[@property="og:street-address"]/@content').extract()).strip()
State = "".join(response.xpath('//meta[@property="og:region"]/@content').extract()).strip()
Zipcode = "".join(response.xpath('//meta[@property="og:postal-code"]/@content').extract()).strip()
PhoneNumber = "".join(response.xpath('//meta[@property="og:phone_number"]/@content').extract()).strip()
Latitude = "".join(response.xpath('//meta[@property="og:latitude"]/@content').extract()).strip()
Longitude = "".join(response.xpath('//meta[@property="og:longitude"]/@content').extract()).strip()
City = "".join(response.xpath('//meta[@property="og:locality"]/@content').extract()).strip()
Country = "".join(response.xpath('//meta[@property="og:country-name"]/@content').extract()).strip()
Raw_Address = Full_Street + City + Zipcode + State
url = response.url
# print "urls>>>>>>>>>>>", url
# print "BusinesName ",BussinessName
final_db = [BrandName, BussinessName,Full_Street, City, State, Zipcode,Country,PhoneNumber,Latitude,Longitude, Raw_Address, url]
print final_db, len(final_db)
'''Creating Table By Brand Name'''
import MySQLdb
con = MySQLdb.connect(host ="localhost", user="root",passwd="root",db="germany_data", use_unicode=True,charset="utf8")
# Final_DB = []
# for final in final_db:
# Final_DB.append(final.encode('utf-8'))
with con:
cur=con.cursor()
create_table = ('CREATE TABLE if NOT EXISTS ' + str('geodruid') +' LIKE GermanyStructure;')
# print create_table
cur.execute(create_table)
con.commit()
''''''''''''''''INSERT into Table by Brand Name'''''''''''''''
cur.execute('''INSERT IGNORE INTO ''' + str('geodruid') + ''' values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''', final_db) #### Number of coulms input
con.commit()
except:
text_file = open("geodruid.txt", "w")
text_file.write("Failed Url: %s" % response.url)
text_file.close()
elif "fressnapf.de" in response.url:
# try:
print "responssnse>>>>", response.url
BrandName = 'fressnapf'
# # BrandName ='hello'
BussinessName = response.xpath('//div[@class="span3 address-description"]/address/text()').extract()
BussinessName = BussinessName[0].strip() if BussinessName else None
Full_Street = response.xpath('//div[@class="span3 address-description"]/address/text()').extract()
Full_Street = Full_Street[1].strip() if Full_Street else None
Second_block = response.xpath('//div[@class="span3 address-description"]/address/text()').extract()
Second_block = Second_block[2].strip() if Second_block else None
Second_block=Second_block.split()
PhoneNumber = response.xpath('//span[@class="phone calling-link"]/text()').extract_first(default='None').strip()
try:
City = Second_block[1].strip()
except:
City = None
try:
Zipcode = Second_block[0].strip()
except:
Zipcode = None
Latitude = response.xpath('//div[@class="module map row-fluid"]/div/@data-lat').extract_first(default='None')
Longitude = response.xpath('//div[@class="module map row-fluid"]/div/@data-lng').extract_first(default='None')
print BussinessName
print Full_Street
print City
print Zipcode
print Latitude
print Longitude
print Zipcode + " " + City
State_Lookup = {'BE': 'Berlin', 'RP': 'Rhineland-Palatinate (Rheinland-Pfalz)', 'BB': 'Brandenburg', 'MV': 'Mecklenburg-Western Pomerania',
'SH': 'Schleswig-Holstein', 'ST': 'Saxony-Anhalt (Sachsen-Anhalt)', 'SN': 'Saxony (Freistaat Sachsen)', 'HH': 'Hamburg (Freie und Hansestadt Hamburg)',
'BW': 'baden-württemberg', 'NI': 'Lower Saxony (Niedersachsen)', 'TH': 'Thuringia (freistaat thuringen)',
'SL': 'Saarland', 'HB': 'Bremen (Freie Hansestadt Bremen)', 'NW': 'North Rhine-Westphalia (Nordrhein-Westfalen)', 'BY': 'Bavaria (Freistaat Bayern) ',
'HE': 'Hesse (Hessen)','SN':'Sachsen'}
import sys
"""LATITUDE LONGITUDE"""
from geopy.geocoders import Nominatim
from geopy.geocoders import GeocoderDotUS
reload(sys)
sys.setdefaultencoding("utf-8")
geolocator = Nominatim()
latlon = '%s, %s'%(Latitude,Longitude)
location = geolocator.reverse(latlon,timeout = 60)
state_geo = location.raw['address']['state']
# if "deutschland" in location[-1].lower():
# state_geo = location[-2].strip()
for key, v in State_Lookup.iteritems():
value = State_Lookup[key].encode('utf-8')
if state_geo.lower() in value.lower():
State = key
break
else:
State = 'N/A'
print State
# import re
Country = 'Germany'
Raw_Address = Full_Street + City + Zipcode + State
url = response.url
# print "urls>>>>>>>>>>>", url
# # print "BusinesName ",BussinessName
final_db = [BrandName, BussinessName,Full_Street, City, State, Zipcode,Country,PhoneNumber,Latitude,Longitude, Raw_Address, url]
print final_db, len(final_db)
'''Creating Table By Brand Name'''
import MySQLdb
con = MySQLdb.connect(host ="localhost", user="root",passwd="root",db="germany_data", use_unicode=True,charset="utf8")
# Final_DB = []
# for final in final_db:
# Final_DB.append(final.encode('utf-8'))
with con:
cur=con.cursor()
create_table = ('CREATE TABLE if NOT EXISTS ' + str(BrandName) +' LIKE GermanyStructure;')
# print create_table
cur.execute(create_table)
con.commit()
''''''''''''''''INSERT into Table by Brand Name'''''''''''''''
cur.execute('''INSERT IGNORE INTO ''' + str(BrandName) + ''' values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''', final_db) #### Number of coulms input
con.commit()
# except:
# text_file = open("geodruid.txt", "w")
# text_file.write("Failed Url: %s" % response.url)
# text_file.close()
# # print BrandName
# print "response>>>>>>>>>>>>>>>>>>>>>>>", response.url
# BrandName = response.meta['brand_name']
# address_details = response.xpath('//div[@class="poi-address-content"]')
# for add in address_details:
# StoreName = add.xpath('div/h3/text()').extract()
# if StoreName:
# StoreName = StoreName[0]
# else:
# StoreName = None
# Full_Street = add.xpath('div[2]/text()').extract()
# if Full_Street:
# Full_Street = Full_Street[0]
# else:
# Full_Street = None
# Zipcode = add.xpath('div[3]/span[1]/text()').extract()
# if Zipcode:
# Zipcode = Zipcode[0]
# else:
# Zipcode = None
# City = add.xpath('div[3]/span[2]/text()').extract()
# if City:
# City = City[0].strip()
# else:
# City = None
# Country = add.xpath('div[4]/text()').extract()
# if Country:
# Country = Country[0].strip()
# else:
# Country = None
# if StoreName!=None and Full_Street!=None and Zipcode!=None and City!=None and Country!=None:
# RawAddress = StoreName + Full_Street + Zipcode + City + Country
# PhoneNumber = response.xpath('//meta[@property="og:phone_number"]/@content').extract()
# if PhoneNumber:
# PhoneNumber = PhoneNumber[0]
# else:
# PhoneNumber = None
# print 'BrandName', BrandName
# print 'StoreName' , StoreName
# print 'Full_Street' , Full_Street
# print 'Zipcode' , Zipcode
# print 'City' , City
# print 'Country', Country
# print "RawAddress", RawAddress
# State = 'None'
# cur=con.cursor()
# query = "INSERT IGNORE into geodruid values('%s','%s','%s','%s','%s','%s','%s','%s')"%(BrandName,StoreName,RawAddress,Full_Street,City,State,Zipcode,PhoneNumber)
# cur.execute(query)
# con.commit()
# Data = [ BrandName.encode('utf-8').strip(),StoreName.encode('utf-8').strip() , RawAddress.encode('utf-8').strip(), Full_Street.encode('utf-8').strip(), City.encode('utf-8').strip(),State.encode('utf-8').strip(),Zipcode.encode('utf-8').strip(), PhoneNumber.encode('utf-8').strip()]
# output_csv.writerow(Data)
# print Data
# # Data = [ BrandName.encode('utf-8').strip(),StoreName.encode('utf-8').strip() , RawAddress.encode('utf-8').strip(), FullStreet.encode('utf-8').strip(), City.encode('utf-8').strip(),Zipcode.encode('utf-8').strip(), PhoneNumber.encode('utf-8').strip()]
# # output_csv.writerow(Data)
|
987,419 | 02f6ccd65d0730aeb0b0b87fb6c452f735035703 | def palindrome(iniStr): #initial string
str = iniStr.lower().replace(" ", "").replace("\t", "") # conversion to lowercase, removal of whitespace and tab characters
n = len(str)
for i in range(0, n):
if i < n-1-i:
if str[i] != str[n-1-i]:
return False
else:
return True
|
987,420 | 2e8414d8d3f36d3fc765b9101f70413b838fe884 | import requests
import soldier
import json
import sys
from getpass import getpass
URL = 'https://api.github.com/user/repos'
py2 = True if sys.version_info[0] == 2 else False
def create_readme():
pass
def create_repo(remote_ssh, description, name, is_private):
if not name:
dir_name = soldier.run('pwd').output
repo_name = dir_name.strip().split('/')[-1]
else:
repo_name = name
body = {
'name': repo_name,
'description': description,
'private': is_private,
}
if py2:
username = raw_input('Username: ')
else:
username = input('Username: ')
password = getpass('Password: ')
req = requests.post(URL, data=json.dumps(body), auth=(username, password))
ssh_url = req.json()['ssh_url']
https_url = req.json()['clone_url']
remote_url = ssh_url if remote_ssh else https_url
soldier.run('git init')
soldier.run('git remote add origin {}'.format(remote_url))
return req.status_code
|
987,421 | 93ce14d57d53f5caeecd7881647c5e4e5f2abbbe | # write your schemas in this files. Use pydantic
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
import asyncpg.pgproto.pgproto
import pydantic.json
from app.data.models import USER_TYPES
from core.factories import settings
from pydantic import BaseModel, validator
pydantic.json.ENCODERS_BY_TYPE[asyncpg.pgproto.pgproto.UUID] = str
class UserSchema(BaseModel):
identity: str
claim: Optional[Dict[Any, Any]]
class UpdateUserSchema(BaseModel):
identity: Optional[str]
claim: Optional[Dict[Any, Any]]
class UserSchemaDB(UserSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class GroupSchema(BaseModel):
name: USER_TYPES
class Config:
use_enum_values = True
@validator("name")
def validate_name(cls, v):
return v.upper()
class GroupSchemaDB(GroupSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class UserGroupSchema(BaseModel):
user_id: UUID
group_id: UUID
class UpdateUserGroupSchema(BaseModel):
user_id: Optional[UUID]
group_id: Optional[UUID]
class UserGroupSchemaDB(UserGroupSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class ServiceSchema(BaseModel):
name: str
@validator("name")
def validate_name(cls, v):
return v.upper()
class ServiceSchemaDB(ServiceSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class EndpointSchema(BaseModel):
service_id: UUID
prefix: Optional[str]
class UpdateEndpointSchema(BaseModel):
service_id: Optional[UUID]
prefix: Optional[str]
class EndpointSchemaDB(EndpointSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class MethodSchema(BaseModel):
name: str
@validator("name")
def validate_name(cls, v):
return v.upper()
class MethodSchemaDB(MethodSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class PermissionSchema(BaseModel):
entity: str
entity_type: str
service_id: UUID
method_id: UUID
endpoint_id: UUID
class UpdatePermissionSchema(BaseModel):
entity: Optional[str]
method_id: Optional[UUID]
endpoint_id: Optional[UUID]
class PermissionSchemaDB(PermissionSchema):
id: UUID
created: datetime
updated: Optional[datetime]
class Config:
orm_mode = True
class PermissionCheckSchema(BaseModel):
entity: Optional[str]
entity_type: Optional[str]
service: Optional[str]
endpoint: Optional[str]
method: Optional[str]
@validator("method", "entity_type", "service")
def _upper(cls, v):
if v:
return v.upper()
return v
class JWTPayload(BaseModel):
iss: str = settings.JWT_ISSUER
iat: datetime = datetime.now()
exp: datetime
identity: str
role: str
type: str = "ACCESS"
class JWTHeaders(BaseModel):
alg: str = "RS256"
typ: str = "JWT"
|
987,422 | 866822d8545dd79c5b889cbb5eccd025f1bf8a24 | import re
from Tokens import *
#Keyword - palabra reservada
#ID - identificador
#Delimiter - delimitador
#Arithmetic - operaciones aritméticas
#Assigment - asignación
#EOF - end of file
#Error - Palabra no válida
TokensLst=[
Tokens("EOF","\#","EOF"),
Tokens("EOL","\\"+chr(10),"EOL"),
Tokens("function","function","keyword"),
Tokens("let","let","keyword"),
Tokens("return","return","keyword"),
Tokens("LParent","\(","Delimiter"),
Tokens("RParent","\)","Delimiter"),
Tokens("LBraket","\{","Delimiter"),
Tokens("RBraket","\}","Delimiter"),
Tokens("Comma","\,","Delimiter"),
Tokens("+","\+","Arithmetic"),
Tokens("-","\-","Arithmetic"),
Tokens("*","\*","Arithmetic"),
Tokens("/","\/","Arithmetic"),
Tokens("=","\=","Assigment"),
Tokens("Identifier","[^0-9\+\-\:\*\/\(\)\%\&\|]([^\+\-\:\*\/\(\)\%\&\|\{\}]*)","ID"),
Tokens("ERROR","","Error"),
]
# Recibe una palabra y regresa el tipo de token
def TokenType(word: str) -> Tokens:
for Token in TokensLst:
if Token.Name != "ERROR":
find=re.match(Token.Regex,word)
if find is not None:
return Token
exit()
def SeparatesWords(Text: str):
words=[]
auxWord=""
i=0
while i<len(Text):
if Text[i]==" ":
words.append(auxWord)
auxWord=""
elif TokenType(Text[i]).Type=="Delimiter" or TokenType(Text[i]).Type=="Arithmetic" or TokenType(Text[i]).Type=="Assigment":
if auxWord!="":
words.append(auxWord)
words.append(Text[i])
auxWord=""
elif TokenType(Text[i]).Type=="EOL":
if auxWord!="":
words.append(auxWord)
auxWord=""
while True:
i+=1
if Text[i]!=" ":
i-=1
break
elif TokenType(Text[i]).Type=="EOF":
words.append(Text[i])
auxWord=""
else:
auxWord+=Text[i]
i+=1
return words
def CodeToTokens(code: str) -> str:
TokensTxt=""
words = SeparatesWords(code)
print(words)
for word in words:
_TokenType=TokenType(word).Type
if _TokenType=="keyword":
_TokenType=word
elif _TokenType=="ID":
_TokenType="ID("+word+")"
elif _TokenType=="Delimiter":
_TokenType=TokenType(word).Name
TokensTxt+=_TokenType+" "
return TokensTxt
codigoFuente=""
with open("codigoFuente.js") as texto:
codigoFuente=texto.read()
codigoFuente+="#"
print(CodeToTokens(codigoFuente)) |
987,423 | 1b9c52d82e6fca390a1b81d646172e52cfc37152 | # _*_ coding: utf-8 _*_
import plotly.express as px
import plotly.graph_objects as go
import dash_core_components as dcc
import pandas as pd
def tb_priceData(data_source):
df_views = data_source["df_views"]
period = data_source["period"]
country = data_source["country"]
company = data_source["company"]
year = data_source["year"]
last_year = data_source["last_year"]
price_data = {}
volume = df_views[(df_views["yr"] == year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country))]["vol"].sum()
value = df_views[(df_views["yr"] == year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country))]["val"].sum()
price = round(value/volume, 2)
volume_company = df_views[(df_views["yr"] == year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country)) & (df_views["company"].isin(company))]["vol"].sum()
value_company = df_views[(df_views["yr"] == year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country)) & (df_views["company"].isin(company))]["val"].sum()
price_company = round(value_company/volume_company, 2)
volume_company_lastYear = df_views[(df_views["company"].isin(company)) & (df_views["yr"] == last_year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country))]["vol"].sum()
value_company_lastYear = df_views[(df_views["company"].isin(company)) & (df_views["yr"] == last_year) & (df_views["month"].isin(period)) & (df_views["cn"].isin(country))]["val"].sum()
price_company_lastYear = round(value_company_lastYear/volume_company_lastYear, 2)
differenceOfAverage = round(price_company - price, 2)
price_YoY = round(price_company - price_company_lastYear, 2)
price_data["price"] = "${price_company}".format(price_company = price_company)
price_data["price_diff"] = differenceOfAverage
price_data["price_YoY"] = price_YoY
return price_data
|
987,424 | e663b5cd77ba17e265bd2af7f91fd1bd4f4af5d8 | def calcula_velocidade_media(distância, tempo):
velocidade_média = distância/tempo
return velocidade_média |
987,425 | a296f187c8ea51dbbb98123a2d9831b15a090781 | from .tensorflow_backend import *
rnn = lambda *args, **kwargs: K.rnn(*args, **kwargs) + ([],)
|
987,426 | 3a30820b88d5282dfc71b4c7ed404a3ef2291419 | # EXAMPLE 1:- To get the source of a website in python....
import requests
r = requests.get('https://xkcd.com/353/')
print(dir(r))
print(r.text) # This line produces the HTML of that page..
# EXAMPLE 2:- To GET AN IMAGE FROM THE WEB IN PYTHON.....
import requests
r = requests.get('https://imgs.xkcd.com/comics/python.png')
print(dir()) # Gives the attributes we can use for the response object...
print(r.content) # Doesn't o/p the image but only the content of the response object..
r = requests.get('https://imgs.xkcd.com/comics/python.png')
with open('csimage.png', 'wb') as f: # Saves the content of the Image in a python directory in our computer
f.write(r.content)
# EXAMPLE 3:- To CHECK/GET THE NUMBER/TYPE OF RESPONSE GOTTEN FROM THE SITE..
# The following are HTTP Status codes:...
# The 200 = Success!
# The 300 = These are re-directs
# The 400 = Client Errors - You're trying to access a page whithout permission!
# THE 500 = Server Errors - Errors you see when a site crashes!
import requests
r = requests.get('https://imgs.xkcd.com/comics/python.png')
print(r.status_code)
print(r.ok) # Means the reponse from the site is TRUE(No Errors)
print(r.headers) # Gives us the headers that come w/the website..
# EXAMPLE 4:- USING REQUESTS TO PASS IN SOME URL PARAMETERS.....
import requests
payload = {'page': 2, 'count': 25}
r = requests.get('https://httpbin.org/get', params=payload)
print(r.text) # Outputs response from the url - httpbin.org
print(r.url) # Outputs the url parameters that was requested..
# EXAMPLE 5:- POST Data TO A CERTAIN URL....
import requests
payload = {'username': 'nonso', 'password': 'testing'}
r = requests.post('https://httpbin.org/post', data=payload)
print(r.text)
print(r.json()) # Outputs a python dict from the json response
r_dict = r.json()
print(r_dict['form'])
# EXAMPLE 6:- PASSING CREDENTIALS FOR BASIC AUTHENTICATION IN PYTHON....
import requests
r = requests.get('http://httpbin.org/basic-auth/nonso/testing', auth=('nonso', 'testing'))
print(r)
print(r.text)
# EXAMPLE 7:- SETTING TIMEOUT WHEN A WEBSITE ISN'T RESPONDING/NOT GETTING A URL RESPONSE ON TIME.....
import requests
r = requests.get('http://httpbin.org/delay/6', timeout=4)
print(r)
|
987,427 | b3000f1925390a13e1cf5705b02a6e13b7d3e018 | from abc import ABC, abstractmethod
from functools import singledispatch
from typing import TYPE_CHECKING
from order.models import Order, Cart, Item, OrderItem
from order.visitor.visitable import Visitable
if TYPE_CHECKING:
from order.models import Order, Cart, Item
from order.visitor.visitable import Visitable
class AbstractVisitor(ABC):
def __init__(self) -> None:
super().__init__()
from order.models import Order, Cart, Item
self.visit = singledispatch(self.visit)
self.visit.register(Order, self._visit_order)
self.visit.register(Cart, self._visit_cart)
self.visit.register(Item, self._visit_item)
self.visit.register(Item, self._visit_order_item)
@abstractmethod
def visit(self, element: Visitable):
raise NotImplementedError
@abstractmethod
def _visit_order(self, element: Order):
raise NotImplementedError
@abstractmethod
def _visit_cart(self, element: Cart):
raise NotImplementedError
@abstractmethod
def _visit_item(self, element: Item):
raise NotImplementedError
@abstractmethod
def _visit_order_item(self, element: OrderItem):
raise NotImplementedError
|
987,428 | 821e64f0e25371b726a93bba1aa0263b2cd0d405 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the query cache implementation.
"""
from distutils.util import strtobool
from functools import lru_cache
from hashlib import sha256
import json
import logging
import os
import sqlite3
from .path import GEN_FOLDER, QUERY_CACHE_DB_PATH
from .core import ProcessedQuery
logger = logging.getLogger(__name__)
class QueryCache:
"""
QueryCache stores ProcessedQuerys and associated metadata in a sqlite3 backed
cache to save processing time on reloading the examples later.
"""
@staticmethod
def copy_db(source, dest):
cursor = dest.cursor()
for statement in source.iterdump():
cursor.execute(statement)
dest.commit()
def __init__(self, app_path, schema_version_hash):
# make generated directory if necessary
self.schema_version_hash = schema_version_hash
gen_folder = GEN_FOLDER.format(app_path=app_path)
if not os.path.isdir(gen_folder):
os.makedirs(gen_folder)
db_file_location = QUERY_CACHE_DB_PATH.format(app_path=app_path)
self.disk_connection = sqlite3.connect(db_file_location)
self.batch_write_size = int(os.environ.get("MM_QUERY_CACHE_WRITE_SIZE", "1000"))
cursor = self.disk_connection.cursor()
if not self.compatible_version():
cursor.execute("""
DROP TABLE IF EXISTS queries;
""")
cursor.execute("""
DROP TABLE IF EXISTS version;
""")
# Create table to store queries
cursor.execute("""
CREATE TABLE IF NOT EXISTS queries
(hash_id TEXT PRIMARY KEY, query TEXT, raw_query TEXT, domain TEXT, intent TEXT);
""")
# Create table to store the data version
cursor.execute("""
CREATE TABLE IF NOT EXISTS version
(schema_version_hash TEXT PRIMARY KEY);
""")
cursor.execute("""
INSERT OR IGNORE INTO version values (?);
""", (self.schema_version_hash,))
self.disk_connection.commit()
in_memory = bool(strtobool(os.environ.get("MM_QUERY_CACHE_IN_MEMORY", "1").lower()))
if in_memory:
logger.info("Loading query cache into memory")
self.memory_connection = sqlite3.connect(":memory:")
self.copy_db(self.disk_connection, self.memory_connection)
self.batch_writes = []
else:
self.memory_connection = None
self.batch_writes = None
def flush_to_disk(self):
"""
Flushes data from the in-memory cache into the disk-backed cache
"""
logger.info("Flushing %s queries from in-memory cache to disk", len(self.batch_writes))
rows = self.memory_connection.execute(f"""
SELECT hash_id, query, raw_query, domain, intent FROM queries
WHERE rowid IN ({",".join(self.batch_writes)});
""")
self.disk_connection.executemany("""
INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);
""", rows)
self.disk_connection.commit()
self.batch_writes = []
def __del__(self):
if self.memory_connection and self.batch_writes:
self.flush_to_disk()
self.memory_connection.close()
self.disk_connection.close()
def compatible_version(self):
"""
Checks to see if the cache db file exists and that the data version
matches the current data version.
"""
cursor = self.disk_connection.cursor()
try:
row = cursor.execute("""
SELECT COUNT(schema_version_hash) FROM version WHERE schema_version_hash=(?);
""", (self.schema_version_hash,)).fetchone()
return row[0] > 0
except sqlite3.Error: # pylint: disable=broad-except
return False
@staticmethod
def get_key(domain, intent, query_text):
"""
Calculates a hash key for the domain, intent and text of an example.
This key is required for further interactions with the query cache.
Args:
domain(str): The domain of the example
intent(str): The intent of the example
query_text(str): The raw text of the example
Returns:
str: Hash id representing this query
"""
h = sha256(domain.encode())
h.update(b"###")
h.update(intent.encode())
h.update(b"###")
h.update(query_text.encode())
return h.hexdigest()
@property
def connection(self):
return self.memory_connection or self.disk_connection
def key_to_row_id(self, key):
"""
Args:
key(str): A key generated by the QueryCache.get_key() function
Returns:
Optional(Integer): Unique id of the query in the cache if it exists
"""
cursor = self.connection.cursor()
cursor.execute("""
SELECT rowid FROM queries where hash_id=(?);
""", (key,))
row = cursor.fetchone()
return row[0] if row else None
def put(self, key, processed_query):
"""
Adds a ProcessedQuery to the cache
Args:
key(str): A key generated by QueryCache.get_key() for this example
processed_query(ProcessedQuery): The ProcessedQuery generated for this example
Returns:
integer: The unique id of the query in the cache.
"""
data = json.dumps(processed_query.to_cache())
def commit_to_db(connection):
cursor = connection.cursor()
cursor.execute("""
INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);
""", (key,
data,
processed_query.query.text,
processed_query.domain,
processed_query.intent,
))
connection.commit()
if self.memory_connection:
commit_to_db(self.memory_connection)
rowid = self.key_to_row_id(key)
self.batch_writes.append(str(rowid))
if len(self.batch_writes) == self.batch_write_size:
self.flush_to_disk()
else:
commit_to_db(self.disk_connection)
return self.key_to_row_id(key)
@lru_cache(maxsize=1)
def get(self, row_id):
"""
Get a cached ProcessedQuery by id. Note: this call should never fail as
it is required that the row_id exist in the database before this method
is called. Exceptions may be thrown in the case of database corruption.
The method caches the previously retrieved element because it is common
for a set of iterators (examples and labels) to retrieve the same row_id
in sequence. The cache prevents extra db lookups in this case.
Args:
row_id(integer): The unique id returned by QueryCache.key_to_row_id() or
QueryCache.put().
Returns:
ProcessedQuery: The ProcessedQuery associated with the identifier.
"""
cursor = self.connection.cursor()
cursor.execute("""
SELECT query FROM queries WHERE rowid=(?);
""", (row_id,))
row = cursor.fetchone()
return ProcessedQuery.from_cache(json.loads(row[0]))
def get_value(self, domain, intent, query_text):
row_id = self.key_to_row_id(self.get_key(domain, intent, query_text))
return None if row_id is None else self.get(row_id)
def get_raw_query(self, row_id):
"""
Get the raw text only from a cached example. See notes on get().
"""
cursor = self.connection.cursor()
cursor.execute("""
SELECT raw_query FROM queries WHERE rowid=(?);
""", (row_id,))
return cursor.fetchone()[0]
def get_query(self, row_id):
"""
Get the Query from a cached example. See notes on get().
"""
return self.get(row_id).query
def get_entities(self, row_id):
"""
Get entities from a cached example. See notes on get().
"""
return self.get(row_id).entities
def get_domain(self, row_id):
"""
Get the domain only from a cached example. See notes on get().
"""
cursor = self.connection.cursor()
cursor.execute("""
SELECT domain FROM queries WHERE rowid=(?);
""", (row_id,))
return cursor.fetchone()[0]
def get_intent(self, row_id):
"""
Get the intent only from a cached example. See notes on get().
"""
cursor = self.connection.cursor()
cursor.execute("""
SELECT intent FROM queries WHERE rowid=(?);
""", (row_id,))
return cursor.fetchone()[0]
|
987,429 | 4e9c327054a2befe9bb2ca892fd5e4febf332dfe | from src.data.data import Data
|
987,430 | 82d310c5e718f9fd79d3df9767e24ee9ff88c7f6 | ##-----------------------------------
##re-identification
##
## model:
## intel/person-reidentification-retail-0288
## intel/facial-landmarks-35-adas-0002
##-----------------------------------
import iewrap
import cv2
import numpy as np
import time
import os
import yaml
import re #正規表現
from scipy.spatial import distance
from munkres import Munkres # Hungarian algorithm for ID assignment
import pprint
##person-reidentification-retail-0288
ie_reid = iewrap.ieWrapper(r'intel/person-reidentification-retail-0288/FP16/person-reidentification-retail-0288.xml', 'CPU')
##facial-landmarks-35-adas-0002
ie_faceLM = iewrap.ieWrapper(r'intel/facial-landmarks-35-adas-0002/FP16/facial-landmarks-35-adas-0002.xml', 'CPU')
face_dir = r'./rsc/face/'
curr_feature = []
feature_db = []
bolDisableTimeOut = True
def PreloadImage():
global curr_feature,feature_db
print(f"feature_db : {len(feature_db)}")
## read menbers infomation from yml file
with open(r'meeting_member.yml') as file:
data = yaml.safe_load(file)
## ymlに定義したメンバーの画像を読み込んで、curr_featureに詰め込み
##
## \face_dir
## \(EmployeeID)
## \(ファイル) <- フォルダ内を列挙するのでファイル名は問わない
for yaml_out in data['member']:
print(yaml_out["EmployeeID"],yaml_out["LastName"],yaml_out["Section"],yaml_out["Position"]) # for debug
each_face = face_dir + yaml_out["EmployeeID"]
print(each_face)
if (os.path.exists(each_face)):
for fname in os.listdir(each_face):
img = cv2.imread(each_face + "/" + fname)
featVec = ie_reid.blockInfer(img).reshape((256))
pos = [0,0,0,0]
curr_feature.append({'pos': pos,'feature': featVec, 'id': -1,'img': img, 'name': yaml_out["LastName"], 'employeeID': yaml_out["EmployeeID"], 'section': yaml_out["Section"], 'position': yaml_out["Position"], 'mouth_val': 0})
else:
print(f"path \"{each_face}\" is not found.")
## curr_featureに詰め込みが出来たら、fncReidを呼び出してfeature_dbを作る
print(f"curr_feature : {len(curr_feature)}")
if (len(curr_feature) > 0):
fncReid()
print(f"feature_db : {len(feature_db)}")
return
def fncDrawLM(face):
_X=0
_Y=1
# landmarkを全て取得
landmark = ie_faceLM.blockInfer(face).reshape((70,)) # [1,70]
lm=landmark[:70].reshape(35,2) # [[left0x, left0y], [left1x, left1y], [right0x, right0y], [right1x, right1y] ]
#print(lm)
# landmark
for count in range(35):
cv2.circle(face, (abs(int(lm[count][_X] * face.shape[1])),abs(int(lm[count][_Y] * face.shape[0]))), 2, (0,255,255), -1)
face = cv2.circle(face,(int(lm[10][_X] * face.shape[1]),int(lm[10][_Y] * face.shape[0])), 3, (0,255,0), -1)
face = cv2.circle(face,(int(lm[11][_X] * face.shape[1]),int(lm[11][_Y] * face.shape[0])), 3, (0,255,0), -1)
return(face)
def fncMouthValue(person,face_ratio,face):
##上唇と下唇の差分
##
## facial-landmarks-35-adas-0002
## [Mouth] p8, p9: mouth corners on the outer boundary of the lip; p10, p11: center points along the outer boundary of the lip.
##
## person : image
## face_ratio : ROIとface_detectのrectの割合(逆数)... イメージ全体を使う場合の考慮。face detectionの結果rectを使う場合は不要
## face : face image
##
## faceのrect_y(face.shape[0])で割る必要あり。顔の大きさが数値に影響があるため。
_X=0
_Y=1
## landmarkを取得
# landmark = ie_faceLM.blockInfer(person).reshape((70,)) # [1,70]
landmark = ie_faceLM.blockInfer(face).reshape((70,)) # [1,70]
lm=landmark[:70].reshape(35,2) # [[left0x, left0y], [left1x, left1y], [right0x, right0y], [right1x, right1y]... ]
##for debug
## face = cv2.circle(face,(int(lm[10][_X] * face.shape[1]),int(lm[10][_Y] * face.shape[0])), 5, (0,0,255), -1)
## face = cv2.circle(face,(int(lm[11][_X] * face.shape[1]),int(lm[11][_Y] * face.shape[0])), 5, (0,0,255), -1)
## face = cv2.resize(face,(200,320))
## cv2.imshow('face', face)
## key = cv2.waitKey(2)
return(abs(int((lm[10][_Y] - lm[11][_Y]) / face.shape[0] * 1000000)))
#return(abs(int((lm[10][_Y] - lm[11][_Y]) * 1000 * face_ratio))) # personを使う場合は考慮
def CurrFeatureAppend(person,pos,face_ratio,face):
## person : detected person(cropped)
## pos : org_posision of person (for draw rect)
global curr_feature
mouth_val = fncMouthValue(person,face_ratio,face)
featVec = ie_reid.blockInfer(person).reshape((256))
curr_feature.append({'pos': pos, 'feature': featVec, 'id': -1,'img': person, 'name':'unknown','employeeID':'unknown', 'section':'unknown' , 'position': -1 , 'mouth_val': mouth_val })
def fncReid():
global curr_feature, feature_db
objid = 0
time_out = 5 ## how long time to retain feature vector (second()
now = time.monotonic()
if bolDisableTimeOut == False:
for feature in feature_db:
if feature['time'] + time_out < now:
feature_db.remove(feature) ## discard feature vector from DB
#print("Discarded : id {}".format(feature['id']))
## If any object is registred in the db, assign registerd ID to the most similar object in the current image
if len(feature_db)>0:
#print(f"fncReid_1 feature_db : {len(feature_db)}")
#print(feature_db)
## Create a matix of cosine distance
cos_sim_matrix=[ [ distance.cosine(curr_feature[j]["feature"], feature_db[i]["feature"])
for j in range(len(curr_feature))] for i in range(len(feature_db)) ]
## solve feature matching problem by Hungarian assignment algorithm
hangarian = Munkres()
combination = hangarian.compute(cos_sim_matrix)
## assign ID to the object pairs based on assignment matrix
for dbIdx, currIdx in combination:
curr_feature[currIdx]['id'] = feature_db[dbIdx]['id'] ## assign an ID
feature_db[dbIdx]['feature'] = curr_feature[currIdx]['feature'] ## update the feature vector in DB with the latest vector
feature_db[dbIdx]['time'] = now ## update last found time
feature_db[dbIdx]['img'] = curr_feature[currIdx]['img'] ## cropped image
feature_db[dbIdx]['mouth_val'] = curr_feature[currIdx]['mouth_val'] ## mouth_val
curr_feature[currIdx]['name'] = feature_db[dbIdx]['name']
curr_feature[currIdx]['employeeID'] = feature_db[dbIdx]['employeeID']
curr_feature[currIdx]['section'] = feature_db[dbIdx]['section']
curr_feature[currIdx]['position'] = feature_db[dbIdx]['position']
## Register the new objects which has no ID yet
#print("# Register the new objects which has no ID yet")
for feature in curr_feature:
#print(str(feature['id']))
#if feature['id']==-1: ## no similar objects is registred in feature_db
if feature['id'] < 1: ## no similar objects is registred in feature_db
#print("# no similar objects is registred in feature_db")
feature['id'] = objid
feature_db.append(feature) ## register a new feature to the db
feature_db[-1]['time'] = now
## save image and info for preload data
## auto glow のオプションとかが必要
#cv2.imwrite(face_dir + str(feature_db[-1]['time']) + '.jpg', feature_db[-1]['img'])
objid+=1
# print(f"curr_feature : {len(curr_feature)}")
# print(f"feature_db : {len(feature_db)}")
return
def showPersonInfo(img):
## img : original image
global curr_feature
## numbering
for obj in curr_feature:
id = obj['id']
## 上部に表示
# cv2.rectangle(img, (0, 0), (320, 40),(128,0,0), -1) # 192 dark blue for frame
# if obj['name'] != "unknown":
# cv2.putText(img, obj['name']+"("+obj['section']+')', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
color = (128,0,0)
if obj['name'] != "unknown":
cv2.putText(img, obj['name']+"("+obj['section']+')', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color , 2)
cv2.putText(img,'mouth: '+str(obj['mouth_val']), (450, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
#print (f"{obj['position']} - {obj['name']}")
## 下部に表示
## cv2.rectangle(img, (0, (cont_height - 20) * 2), (cont_width * 2, cont_height * 2),(128,0,0), -1) # 192 dark blue for frame
# cv2.rectangle(img, (0, 360), (640, 400),(128,0,0), -1) # 192 dark blue for frame
#
# if obj['name'] != "unknown":
## cv2.putText(img, obj['name']+"(id:"+obj['employeeID']+')', (10, 195), cv2.FONT_HERSHEY_PLAIN, 1, color, 1)
# cv2.putText(img, obj['name']+"("+obj['section']+')', (10, cont_height * 2 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
curr_feature.clear()
return img,obj['position'],obj['mouth_val']
def draw_one_person(img):
## img : original image
# global curr_feature,feature_db
global curr_feature
color = (255,255,255)
## check mouth val
for obj in curr_feature[:1]:
id = obj['id']
cv2.rectangle(img, (0, 180), (320, 200),(128,0,0), -1) # 192 dark blue
cv2.putText(img,'mouth: '+str(obj['mouth_val']), (10, 100), cv2.FONT_HERSHEY_PLAIN, 1, color, 1)
curr_feature.clear()
return img
|
987,431 | c55c6712c16ed1f8fc61ade13deea717b418f973 | # import random
#
# class CardRank(): # Создаем колоду"
#
# def __init__ (self, deck = None):
# self.deck = list()
# rank = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King', 'Ace']
# suit = ['Pi', 'Che', 'Bub', 'Kre', ]
# for x in rank:
# for y in suit:
# self.deck.append(x+y)
# random.shuffle(self.deck) # сразу её мешаем
#
# koloda_1 = CardRank() # Создаем экземпляр колоды
#
# class Card(): #создаем класс карта
#
# def __init__(self):
# self.card = koloda_1.deck.pop(0) #из колоды достаем карту и удаляем её
#
# class Hand(): #создаем класс рука
#
# def __init__ (self,kolvo = 3, hand = None,):
# self.kolvo = kolvo
# self.hand = list()
# for count in list(range(kolvo)): # немного магии
# x = Card()
# self.hand.append(x.card)
#
# def show(self):
# return self.hand ##возвращаем открытые карты хотя можно было бы и через
# while True:
# first_bank = 100
# second_bank = 100
# first_stavka = int(input('Ставка первого игрока: '))
# first_bank -= first_stavka
# second_stavka = int(input('Ставка второго игрока: '))
# second_bank -= second_stavka
# first_hand = Hand(3)
# print('Первый игрок',first_hand.show())
# second_hand = Hand(3)
# print("Второй игрок",second_hand.show())
#
# first = input('Первый игрок: Фолд | Чек | Рей: ')
# first = first.lower()
# if first == 'фолд':
# second_bank += first_stavka + second_stavka
# print('Банк первого игрока', first_bank)
# print('Банк второго игрока', second_bank)
# print('the end')
# elif first == 'чек':
# pass
# elif first == 'рей':
# first_stavka += int(input('Ставка первого игрока: '))
# print(first_stavka)
#
# second = input('Второй игрок: Фолд | Чек | Рей: ')
# second = second.lower()
# if second == 'фолд':
# first_bank += first_stavka + second_stavka
# print('Банк первого игрока', first_bank)
# print('Банк второго игрока', second_bank)
# print('the end')
# elif second == 'чек':
# pass
# elif second == 'рей':
# second_stavka += int(input('Ставка второго игрока: '))
# print(second_stavka)
#
#
#
#
#
#
#
#
#
#
#
#
#
#
|
987,432 | 10c1185a29a2e549e7ac2d1bc51c93d6bb2c3ec6 | #import APP_SETTINGS
import os # Access to Environment Variables
from twilio.rest import Client # Client object from Twilio
account_sid = "TWILIO_ACCOUNT_SID" # Twilio stuff
auth_token = "TWILIO_AUTH_TOKEN" # Twilio stuff
my_number = "MY_PHONE_NUMBER"
twilio_number = "TWILIO_PHONE_NUMBER"
message_send = "Hello, world!"
client = Client(account_sid, auth_token) # Create Twilio Rest client object
client.messages.create(
to= my_number,
from_= twilio_number,
body= message_send
)
|
987,433 | 98436d5dc64938c26c3cd28b2fdbeec07ec48c98 | def rendre_monnaie(prix, x20, x10, x5, x2, x1):
res20 = None
res10 = None
res5 = None
res2 = None
res1 = None
entree_totale = x20 * 20 + x10 * 10 + x5 * 5 + x2 * 2 + x1
if entree_totale >= prix:
entree_totale -= prix
res20 = entree_totale // 20
entree_totale %= 20
res10 = entree_totale // 10
entree_totale %= 10
res5 = entree_totale // 5
entree_totale %= 5
res2 = entree_totale // 2
entree_totale %= 2
res1 = entree_totale
return res20, res10, res5, res2, res1
print(rendre_monnaie(38, 1, 1, 1, 1, 1)) # (0, 0, 0, 0, 0)
print(rendre_monnaie(56, 5, 0, 0, 0, 0)) # (2, 0, 0, 2, 0)
print(rendre_monnaie(80, 2, 2, 2, 3, 3)) # (None, None, None, None, None)
|
987,434 | 2de174344453b98f2eb3a8d6084ac895531f3f22 | import re
def clean(input):
"""
>>> clean("{}")
'{}'
>>> clean("{{}}")
'{{}}'
>>> clean("{{<>}}")
'{{}}'
>>> clean("{<{}>}")
'{}'
>>> clean("{<a>,<a>,<a>,<a>}")
'{}'
>>> clean("{<{>}>}")
'{}}'
>>> clean("{{<a>},{<a>},{<a>},{<a>}}")
'{{}{}{}{}}'
>>> clean("{{<!>},{<!>},{<!>},{<a>}}")
'{{}}'
>>> clean("{{<!!>},{<!!>},{<!!>},{<!!>}}")
'{{}{}{}{}}'
>>> clean("{{<a!>},{<a!>},{<a!>},{<ab>}}")
'{{}}'
>>> clean("{{{},{},{{}}}}")
'{{{}{}{{}}}}'
>>> clean("{{{},{},{{}}}}\n")
'{{{}{}{{}}}}'
"""
return re.sub(r"<.*?>", "", re.sub(r"\!.", "", input))
def score(input):
"""
>>> score("{}")
1
>>> score("{{}}")
3
>>> score("{{}{}}")
5
>>> score("{{}{}}")
5
"""
score = 0
level = 0
for c in input:
if c == '{':
level += 1
score += level
elif c == '}':
level -= 1
return score
input = """{{{{{},{<!!!>},<}<!e!>,<<!>!>},<>,{<"u!!!>,!!'!!!>!>,<}{!>,<u!!!>e>}},{{{<!!!>,<!>u!!,>},{}}}},{{<,!>},<!>},<ae!!<''"!>},<"}ii!>},<!>!!!>>,<o!!}!!!>o!>},<{,<<!!{{u{!!!>!>},<!>,<>},{},{<!!,i!!i'!>},<ea!!!>{!}!>!!i!!>}}},{{{{<i!!!>o'!!{'>},{<!!!>ao,!!!>!u!!!>{<'oui!!"!>>}},{<}{!>},<a!>},<!!<'o,!!o!!!>'}!a>}}},{{{<!>>},{{<!!!!!>!!!>o!"!!!>!!!>},<i>}},{{<'!!<!!!>!>i!>,<a'>}}},{{{{{{<e!!!>{>},{{{<!!>},{}},{}}}},{{{<}ai,!,i!>,<,aii}!>},<!!,!>},<!!!>},<}>}},<"a!i'{>}},{{<a!!,!<{e!!!>},<u!>,<!!!>"<{oe>,<"ie!>!!!><!>},<!!!"!!"'{u>},{{<}!a!>},<!!a<!>},<!>!!!>}!!{o!>},<ia}o!>>}},{{<!>},<!>!!e!!a{!!i!!u{{!ao>}}},{{},{}}},{{{<!a}>},<e!>,<!>},<!>},<{},,!>,<>},{{<!!!>u!!"}!>},<i!>,<!!}>}},{<!>a{<i'!>!!'!!}!>","o!>},<!!"<!!>,<<!!!>!>},<!>},<>}},{{{<!!}!>'!>},<<!>},<"!>},<!>,i,,!>,<!">},{{{<!}}<o!>,<}!!!>!'e}!>},<i!!o}!>},<u!!e!>>}},{<>}}},{{}}}},{{},{{{{<!>,<i,e!!"!>,<'!!!>u!>!>'}>},<o>},{{{{<''!!{!!iu}}!>uoi!><!!{!>!<u>},<o!>,<a!>},<!>,<{,!>u>},<}!!'>}}},{{<a{,!!!!!>i!!,<!>},<!!!><>,<!>'"i!!">},{{<!>,<{i!>},<!>,<>,{}}}},{{},{{<}ei}!!'e",i!>,<o!!<'e}!!!<>}}}}},{{{},{{{{<!!'u!!!>{o!!a!!!>!!!"o>},{{<a},<<!>,<!>ii!!!>!!!>!>,<'u!>},<o,!>,<!!!>>}}},{<,>},{{},<<"{!}'{!!!>}'!>,'ae!>},<>}},{{<!>,<e!!!!!>o!!!>a!>,<}}}e!>!>e!>},<!!<u!e>,<ea'!!!>,eao<eoo>},{{<e'ioi!!e{u{e<e!!!>},<i'!!!>'>},{{{<!>!>,<!!o!!,{a"o<!>,<e!!a<u!',a>},<!!!>''!>,<<!>,<u!>,<uo!e!>,<a!>,<>}},{<!>io!!<!!!>,!>,<,!>},<!!,oi!>>,{<!!"!!}!!,>}}},{{}}},{{{<!!!>'!e!!!>o{>},{<'e!!i<i!!}!!!!!>},<{>}},{<a!}ia>},{<!!o>,<aa}!!!!!>},<''!"!!!>>}},{{{<!>},<'!>,<{!"!>},<!!!>>,<!!!>ii!!eoo!>,<i!!e>},{{{<<i!>},<!!>},<'"}<}!>!!!>,<{!>},<!<!!i'}u>}}},{{<i!!!>!e}!><!!!!<e!>!!u!!!!ie!!!!!>aa{>},{}},{{<''!!o!!'!>},<!!!>!!!!!i!o'a!>},<oeu!'>},{{<,u!!,!>},<!!}!>!!}""aa>},{<a<iu!>},<u>}}}}},{},{{{{{<>,{<u!!!>!u{,}u!,!!!!!>!!",!>!>},<!!{>}},{<!>},<!<a!!!>,<{!!!><,e!<e!>},<a!uu{>}},{{},{}},{{{},{<a<!>u!!!>'!>},<e>,<!!"ea!<u!>,<,i!!>}},<!'!!!>}">}},{{<!>},<ou}a}>},{<}!>,<{uo!!!>!a!><!!!>e'!>},<aai>}}},{{<!!!!{"o!!!>},<{}e!>!>"!>},<{!!!>,<!!!>!!!>!!i{>}},{<'e>,{{<'!!!>a{!!"ouo{>},{<!!!>"!<!>},<"!}u!><o'!!ii{}!!"o>}}}}},{{{},{<!,!>,<!!u!>o}!>!>,<!>,<!a!e!!>,<!!"{,!>,<!>i<,!!!>>},{{{<!>},<i!!!i>,<!>{!i!e!>,<!>,<>},{<!!!<!!a!!>,{<o>}},{}},{<!!'!!!!"!e,!!!!'e!!!!,!!!>ua"!e!>u!u>}}},{{{},{{},{{<!!!!,!!!!!!!!!!!>,<e!!!!}}{a!>,<!!!!{>,{<!!i"},u!>},<o!>},<a"ie">}},<!>},<!!!>!>!>},<<'"'>}},{{}}},{{<i>,{{<}'!!!>!>,<u,!!{!!!>!"!!!>},<'o>}}},{<!!!>!>},<!>,<!o>}},{{{<oo!!!!oo!!!!!!o>},<"<!!!>>},{{}}}},{{},{<u!>},<!!!><,i!>,<!!'{'>}}},{{<ae},a>},{<{"!>i,"!!!>a!!!>>},{<!!!>!'{<<<o>,{<"e",a>}}},{{{<{!!e<!!!>u,!>,<u>,{<<"<oee!>,<!!,<'!!!>!!!>{!,">,{<<!!!>ai!!!>},<u>}}}}}}}},{{{{{}},{<e,"u!!!!!!oa'oe}a<e,u>}},{{{<'{!'!!!>,o{{!i'u>,{<!!i!!!!!>ae!!!e!e!}{u>}},{},{{<,u''}>},{<{a'!>,<,a!eou!o!!',"'}u,>}}},{{{<!!!!'>,{}},{{<}<{a!>},<}<!!!>u!<a,!>!!a!>,<!!{'>}},{<!!a!!!>,iaou!>},<oia!"!!!>},<!!!><!!!>u!>},<>,<!!e}{i,,u>}},{<!!!>uu!!eea!>},<'}!e!>},<!!o!!!!,>}},{{{<io!>,<}}e"!!!!!>,<!>''!>,<ii!!!>,<!>},<!>!>},<>},{<!!!>!>!{''!{u!>,<a,!!a>}},{<e'"<!!!>o!!,i}!{<,!!!>!"o!!"u'>}}},{{{{{<!!}!>!>,<u!!ae""o!!!!{!>>}},{{{{<i!>,"!>,<{,!!',!>!{!>!!!>,<o'!!!>>},{<!!!>,!!u>,{<"!>},<o!>,"!"!>,<u',!!!!o>}}}}}},{<eo!!u!>,!>,<!!!!}}>},{{},{<!{<>}}},{<!!{!!!!!'"<!!!>'!!"u>,{<"ao!>},<!!!!'oe!!o!!u!>},<o}!>,<o>}}},{{{<!>!>,<!,a!>!a"}!!{<!>!>!>>},<ou>},{{}},{{{<!>!>},<!>{!!!>!>},<i,,!>},<o>},{{},{{<a!!!><"'!>!>},<!e>},{}}}},{{}}}}},{{{{{{}}},{<!>,<}<,"a!,!a!!!>"!!'>}},{{{{{<'u{a>}}}},<{!!!>!>,<!!!>!!<'u!>},<>},{{<">},{}}},{{{<'!}''"!{,}<ui}e!!!>!>{}">,{}},{{<!!i!!!>!!!>,<u!!!>}!!!>eoio>},{{<!>},<!e!>,<"!>,<!>,<!oo!!o"aiu!>},<>}}},{<u!!!>!>,<>,<o!u}>}},{},{{},{{{},{<a!!!>!!,!!<"'!!!>,!>},<<"!!!!!!!!!!!>,>}},{<a>,{{<!>},<!!!e{!>},<,>}}}},{{{<<<i'"!!!!!!}i!!!>,<'!>!"!!!>e!!!!e}!!!!>},{<",{!>o!>},<,,!!>}},{{<>},<>}}}},{{{{{<u{!!!>!!!>}}}i>,{{<"u<!!!>!!!>!!!!ea{!!!!!!!>u!!!>ie>}}},{<!!!>'!!!>,<i{!!!>>}}},{{{{{{}}},{{{{<{,{a!!!>!o!>,<'{>,{}}},{{{{<!!!!,>},{<e!>,<!!!>!!!e{"',}!,>}},{<!>}!>},<,!oo!>>},{{<!!<'!!!>!!,'!>},<i>},<!>!>},<!>,<ae!>e>}},{{<!!!>!!u!><o!>!!!>!!!>,<>},{<eii!i}!!!!u!{{>}},{{<,uaa!>},<e'!>,<'u!>,<!>!>,<!!>},{{<{}!>},<>}}}},{{},{}}},{<oo!>,<!!i>,<!>},<"}"}!'!!!!u!>},<!>,<!!!>},<eu!!!>>},{{<a!!!>,<!>},<!!}!!!!{!>},<}o!>,<!>},<!!!>},<'a{!!!!!o>,{}},{{{<!>'!>!>"!!!!!>,>}},{<!!<!u!!,>,{}}}}},{{{{{<!!!!u!!!>!>,<!{!!<'""i!!,!>},<,>,<!>,<o!>,<}a!!!>}a!!!!!>'!>a<!!>}},{{<!ea!!!!u!>!>,<!e!!o!>},<'>},{<!{>}}},{{{<}",'!!o}>}},{},{<a!!!!!>!!!>!>,<a!!o!!!>,<'u>,<{!i!!!>},<!!!>{<!>,<'!!!>{ii'iu!"!u!!!>>}},{{{<'!e!!{>}},{{{},{<i!!!!!!}!!!>u!!!>"!>a>}},{{{<"'!!!!!>"uee!!!!!!o!>,<"'!!",>},<{>},{{<!!!!iu}!!!{}!!"!>,<"!>!"oiuo>,<e">},{<!>},<!"!<{a!>!<>,<e'aaa!!!!!>a"!!!>,<!>!!}<>},{<!<}}!>},<a{!!!>},<!>},<!!!!e'i!>u>,<}!!!>!>,<a!<"!>>}},{{{{<}!>,<!>},<!>},<'e}},!!!>},<}!!!><"}<u!!!u>},<"!>},<'a!!!>>},{<,!!!><!>>}}}}},{}}},{<<!!i>,{<'!>!!i{u!}a!!!>,<!<!}!>},<!>e"!!!!!>,<!!o<>}},{{}}}},{{{<!!!>'!>,<!!!>!>,<u!>},<eaa>},{{<<!!!!!!u!>},<}a!<!>,<>}}},{<},"i!!!>!u!>},<<!>},<>,{<<<!>,<!ou!e!>},<>}}}},{{{{},<"io!}'!>},<!u>},{}},{{<!>},<,!>},<>,{{<!!!!ae!>},<!!"!!u!!u">},{<>}}}}},{{{{{{<"!>},<!!!io>},<!>,<'iiaoo<<'!>,<,!!!>e''>}},{{}},{}},{{<'{!>,<!>},<,"u!>},<o!>>},{<!>},<!>},<!!a!,!>},<"!!!>a!!,uu!>e'!>,<"!!!>>,<!>!>,"}!!!>{!>,<e!u!'o!!!!},ui,>},{{<"u!eo{!!!>!>},<"",!'a!!!!o}{e>},{<,<a{!>!o!'{>}}},{{{{<"!>,<{i!!!>},<!>,<o!>o>},{<ie!!!>'<,!!!>,!u"!!!!!!>}},<e{,!!!>!>,!!>},{{{},<{!!!>i>},{<!!<e>,{}},{{{{{<!>,<a{!>o!i{o!>},<!>{oo!!!!{>},{<a{!!!>u!!!>!>,<!>a'!!<i!>,<>}},{<i{!!!>!>aa}e{u!>,<'>}}}}},{}},{{{{<{,<!u!>{!>,<>},<}u!,a{!ei{{o!i!><oo!>},<ea>},{<!}!!!>a!!"!>,<a,!!!>oeuo!!!>">}},{{{<!>,<>,{<u!"!>,<!>,<!!!!}a!!!>!!u"!>,<',"!!!!e!!i!>,<!'>}},{}},{}},{{{},<o,!!'e{eue!>!>!!!!i!>},<'>},{{<u!>},<!o'!!<!>},<i!oui>,{{<",!!!>!!<!!"<a{e!>},<'!!!!,,""u>}}},{<!"!!,<u!>,<!!u,<i!!!!}>}},{<!>},<!!!!}u"!>,<{u!>!!<<a!!'<!ie}!>>,{}}},{{{{{<"!!ou""!>{,!>u"!>!!uo{{a>}}},{<!!u{!,{!i>}},{{{<ia!{!>},<!!!>>}},{{<!!!>!!e}}}{<!>},<!>!<a!!<!{{u'''>},<ui!,o!"i}>},{{{<o!!!>!>!!!>"}!!!!,!!!>'a!>a!{!!>,{{<!a!>,<i{!>},<"!>!!!>}!>,<'>}}},{<}!>,<>,<{!!!>,<}!!!!!>'!!!>!!!>!>,<!>,<uui!!,>}},{{<}"''>},{}}}},{{<!!!>ouo!!!>'o}}"{!!,e!!o>}}}}},{{},{<u,>,<"!!!!!!}!!!>!!!!,uoe>}}}},{{{<!,'}o<u!><<">},{<a!!{!!i!!ou!>,<o"!!!>>}},{{<!>},<!>,<{!!!>!>"oaoa">},<!,,!!{!{<!>},<"u{au>}},{{<>},{<!!!><<!!!>!!!>!!!>!>,<!>{!oaa!!!!e>,<"!!!>!!u!!,!>},<i!>,<>},{<>}}},{{{<!!>,<!o!!ao!>},<!!!>!!i!!>},{<a!<}o',,!!!>,<!>,<<!>},<>,{<!!!!ao!>ua<!>ea,"aa!!!!!>>}},{{<a!}<{!!!>o!">},<"oia!>,<"<'{!i!!ea!!'!>,<!!ui"!!!>,<u>}},{{}},{{<}!!"ieo!!e!"e}!!!>>},<!>,<>}},{{{}}},{}}}},{{{{{{<ua"a'<!>'"a!iu"!{!>},<!!>,{<!>>}},{{},<!>,<}!>},<aaiuu!>,<u!!!>!!!>i!>',>}},{{{}},{{<!!!>'!>a!!!>},<}!!e}!!!!}>},{<,ue,!>},<!ua}!!>}},{{<!!!>>},<a!!!!>}},{{},{{<!a!>a{!!!>}}>},{<!!!i!!!!!>!'!>},<!!!>},<!!ao!!!!<e>}},{{{<i!>!>!>},<!!oa!!!!}!!!>},<oi'<!>!>,<!!">},<'}!{!!!>{>},{<}!>,<o>}}},{{<<!>},<!>,<o>,{{}}},{},{{{<!>i{!!!>o!!!>!>!!aa!>!u!!o!>,<>},<!!oi}a}!!!>},<!!u>},{}}}},{{<au!!!>"u}!>},<"!!!>>},{{}}},{{<!>},<{e!!!><!>!i<>,{{<o!!"i{!}o<!>,<!!"!>!!!>!>},<!!<oo!!>},{{<!!!>!>},<ea'{",ua!>,<!>,<",{!!!>!!!>,<io>}}}},{<!!o!>},<!i!!}!!!>!>,<!!!!!!!!!aa!>!!!!!>>,{{<!>'!!!>a{}!!>}}},{{{<,!!!>!!u},!>},<!!!>,<!<}">},{<!!!,!!'ae!>i!>,<e}e"!>},<"">}}}}},{{{{{<'a,eo<<!>!!!u'u!>eo<o">,{{{<}">}}}},{<!>},<}!!!!{!!>}},{{<}<!>},<}<}i<!!!>!!!>!!!!!>u>}}},{{{{{{<!!>}}},{{<!!ai<!!!e!!!>}!!!>{!e>},{}}},{<{e!'>},{{{<a>},<!!!>''!>!>,<eo!!>},{<!!!>,<!!e!!!!!>}!!!{e!>},<u>,{{<!!",>}}},{<<!!,!>},<!!e!>,<au{!>>,{<"<u{!>},<!!!!i!>},<{,!!!!!>!<<>}}}},{{<!!}!>},<<!!{!!eo!>,<{a!!!!!>},<>},{{<!!!>!!,!!i!!!!!>{!!!!!i!>ee!>},<}!>a'"iu!!i>}},{<!>!>,<<!>,<>,{<}!>,<!!!!!>">}}}},{{{<'!!!>!e!!!>,<!>i!>o!!!>o!!u,!!!!!!!!!>oa"a>},{<{u>},{<!>,<,!!!>!!!>{o!!!>!!ioi!!!!!!}a{u>}}}},{{{{}},{<,!!u'!!!>a<ii"<!>!!<!!!!!>!>,<'!!e{>}},{{},{},{}}},{{}}},{{{{{{},{},{<u!!!>!!e!!!!,!!!>ui>,{<ou{'!>,<,eeoei"!!!>>}}},{{<!>},<{!!,!,eo'!!<!!<!>,<a>,{{<!>},<a,!>},<!>!!i!!}{o<>}}},{}},{{{<>},{}},<ooa<o!!!>!!!><"!>},<!oi"i!!>}}},{{<i"!!!>{!!!>!!!>,>,<!>,<<{!!'e!!!>!!u!>},}"ei!!!><"i>},{{<ia!>!!o}'!>},<!>!"a!>},<u"!!!>},<i{u'>,<!!!>},<a!>},<!>!!!>!>},<'!o,!>,<e<!!{!>,<!!>},{<o<!>>}},{}},{},{{<!!!>!!}oo<"oo!><i!>e,o!!a,a>,{<!>},<>}},{{}},{{{<ie!>},<!e!!{!><e!>},<'!!!>"!{<>},{{{<!>,<e!>!>,<<}{e!!!!!!iu{!!!>!!<<'>},<!!!>!!a<>},{}}},{{<o!>},<<!>},<'!!a{!!">,{}},{{<io!!ioi!>,<!!"ii!>},<o>,{}},<e!>eee!>},<>}},{{<!>,<{!!i>},{{{}},{{{<>}},<<'ueu!<!!!>!>,<>}}}}}},{{{{<!!<o!!!!!!<>},{<{,i}o!!}a!>,<!!iaa!>},<'<{e>,{{<">},<ae<,'!!!!!>u!!>}},{{},<,>}},{{<!>,<!>,!!!>u!!!>ie!>"!"!!!>!!!!!!!!!>>,<'o!!u}!>,<">},{{}}}}},{{{{<<!>!!!>u'!i!>},<a<o!>>}},{{{<}!!>},<aio"!!!>},<>},{<!!!!"{!!{!!'}!{i',o<!"!>'!>,<!u!>},<>,<,e!!'a""o>}},{<"!e<{<e!!i!>{>,<},ai!>},<>}},{{{<!>!>},<!!!>{ie!!!!">,{{},{<!>}o}ai}>}}},{{{},<i!>},<{ui!!!>u"!>,<oa>},{{<,!u!i<'>,{<i!>},<u!{!!!}'!>,<,'!>},<!!!!!>!!a'!}!}e!!!>,<>}},{<,!i}i!!ue>}}},{{{},{{{<!>,<!>,,!}>}},{{<!>,<oui<i{!!uu!'o!>!!!>,!!!i!!>},{}},{{{{{<!>,<!!ie!<ia!>},<a'!!">}}},<i{i!>,i>},{<!!,a'!>,<!>},<ii<i!>,<!!o!"},!!au!!!>!!!>">,{}}}}}}},{{{<!>,<!>},<!!o!!<!!!>!>,<!!!>!>},<a>},{}},{<u<!!eee!!>,<a!>{!>},<!>},<!!!>i">}}},{{{},{<e>}}},{{{{{},{<!!o!>},<o!!uouao!!!>aa!>},<!>},<>}}},{{{}},{<!!!>e!!!>!!!>,<{!!!>{{'ae!>},<e>,<!!!>},<!'a!>,<i{>},{{{<!>},<}!'e<!!!>,<<}uuu>}},{{{<!>,<,}e!!!>>}}}}},{<!!"u">}},{{{<<"">},{<u<"!!{!!,i!>,<!>},<a!!!>!>},<!!!!!>>}},{<u!!!>,<e"e"!{!o<}!>!u!>,<<!>>,{{<!!oa{<a"!!"<!>!>,<a!!"!>},<>}}}},{{{{<"a"e,a,!ao{!'!>},<!>,<!!}!>},<i>},{<!!!>},<o"o>}},{{<o!!ai!!!>{">,<!<,{i!>},<!!aeo!!!>!"o!>{!>,<<>}}},{},{{{<{!!!i!>},<!>},<,>}},{{{<!!!!e}a!!!>},<!>!,u',{<aa!!e>}},{{}}},{{<u}a!>},<>,<!!{}'!!<>},{<!!!>},<!!!!"!a!>},<<>}}}},{{{<}e!!{"!!'!!!>!!i!>!>!>},<!{i>},{{{<i!>,<a!>,<!>},<a<'!!uu!>},<>}}}},{}}}},{{{<!>},<>,{<aue>,{<'>}}},{<!>,<i!<!>,<e!!<i!!!>{io!!o!!!>ea>,{<oiu!},!!{}!"u"!>},<>}},{{<}uoi{'!!"a!>},<!!!>,<a!>,<,u"!a!!!>>,<!!!>oeoe!>,<!>},<>},{{<uo!>,<",<"e!!}!!!!'!>},<!!a!<>}},{<!!!!a!>u!>},<!>},<a'e!>,<o'!!!>!!!!!>>}}},{{{<!>},<!!!>},<i'!oo,<}a!!e!><!a!i!{>}},{{<oo{>}},{{{<!>},<oo{e!!!>,<>}},{<"!{u<i{i!!!>}a"u!e{!>},<>}}}}}},{{},{{{{},{{<u}<{'!!!!!>!!!>o!!!>,<!!o,"i>}},{{{{<}!"}!!ao!!ua,oe!!!>>},{<!!!,!>!>},<>}},{{}},{{{<>}}}},{}},{{<!<<!!!>u!>,<'a{o,u>,{<!>o!>},<!a<!!'!!!!!!!>!>,<o!>,<!>!>>}},{{},{<!u>,{<!!!>ue<ae}!!"e}!>}!>!!!>>}},{<eu"!>!{!!<>,<oo!!',ea!>},<i!>,<!>"!>!>,<{!>},<{!!>}}}},{{{<'!>>,{<!}!!!>},<"e'!!!>},<"o"!><u!>},<u<>}}},{{{{{<}!!!>a!>,<a!>}{!o!{>},{{}}},{}},{{}}},{<u!!!!!>}!!<!!,}ei!>},<!>!!!o!!!!!>!!,e>},{<u{{'!!"<!!,!'oiii>}}},{{{},{}},{{{<!>",e!!!>o{o!>,<!>}!!!>},<!>,<!>,<'o>}},{}}}},{{{{<u!!!!!}}a{!!!><}!a>,{<ou!<!><">}}},{<!i!!<,<,i<'{>,{<aeo<!!'!!!{!!{'!!!>!!!>,<!!!>!!!!!}e!>},<{>}},{{<<a!!"{,a!>,<!!!>!>,<!!!!!>oui!>},<{u,}>},{<e!!oou!!!>u!!!>!>,!>!!!!,!>,<!!!>},<">}}},{{{{<!!o!>,<"'!>,<!<!!!>{>,{<'!!!!!>eu!>},<!>u!!}>}},{{<i!!!a'{oi!}{!!!>"u,!!!>},<>}}},{{{<!!{!!!>ea"!!"!>!!!>!!!>!>,<!a{i>,{{<!>,<!!}'io!>},<i}!>},<!!!e'{!>,<!!!>,<!o!>,<!>,<>,{<!!i!!>}},<aa!!!!'{>}}},{<o!>},<!!i!>,<}!<!!!>!}{}i,,!!o>,<!!!!'!!!>>},{{<!!!>'a!<"!>},<i!>,!>ea,!e{uue>},{<{!>,<i!>o<,i{}u,"<}>,{{<!>,<!!o,!>},<}u>}}}}},{{{}},{{<!>i>},{{<!>,<!>},<!>},<!>},<!>,ee!>,<!!ee!!!!!>!!!!{!>},<<!!!>e>},{<"!>},<u!'!>},<>}}},{}},{{{{},{{<,"a!,ai{"!!!uo!>!!}{>,<o}e!!!!!>'!!!!!>>}}},{{<!>},<!>,{}i"<>}}},{{{<!!<>},{{<!>,<{!!!!!>i!!}o{!!!!i>},<"!!"u!"}",{<ai<e!o<>}}},{{{{{}},{<!'<<!>},<>,<a!>},<}!>,<>}},{{<ui!iu,<!>},<!>},<,!<!a!>"a!!"!>,<!>>,{{<a!!!>,""}!!!>'>,<{!,!>},<"!!!><!!!>,,!>},<">},{}}},{<,i!>},<u"iuu!!"!!>,{<!{!o!ae{!!!'!>'!'!!e!!!>!!<e!!oao!!!!o!">}},{{{<'!>i>,{<!>{''>}},{<<"ui<!>}}!!io<!!a!!!>{!!"a!>},<>,<u>}},{{{<!!!<'>}},<<i!>},<'e!>},<>},{{<!!!>!>i,e>},{{},<!ao!>>},{{<!!}a{!o!>!!!!e{'!!ia!>},<o!>!>!>,<!,u>}}}}},{{<{!!!>!>},<!>},<>,<o!!!>},<!!<!!!e>},{{},{<o>,{{<{!!}!>}!o!>,<!!!>a!>,<!>,<u{!!!!o{!>},<!>,<u>,<<'ii"!!!!!>a,!!!>>},{<!!!>,<<!!!!!>>,{<!!<e<o"!!!>!}}<u!>},<ou"!>!!,}o>}}}}},{<"!>},<!!!>!!!>i!e!><o{!!o!!>,<!>},<>}},{{<{!!!>,<o!>{o!>},<o!>},<>,{}},{}}},{{{{<!!!>ue!!i{!!eu>},{{<u!>!!!>,<<eo!>,<">},{<e,!!<'au{!!"{,!>>}}},{{},{<!>{ue!>,<!{!!ei!>},<!>!>},<!!!>iiie!<>}},{{},<!!!>,<!!!>!<!>!!!><u,!}!>'{!>},<!!!!!>!!>}},{{{},{}},{<!!!!!!!>,<{<i!>u}>}},{{<a!i'!!o!>},<,!!!>!>au!>>},{<a!>,a!'!}a!!a>}}},{{{{},<!>>},{{{{<!oiaa<i'oe!!}a'e,iuaa>}}}}},{{{}},{<!>},<!>!>,<i!>},<,!>,<,e!!!!!>'o!!!>">,<a!!!!!>{"!>,<!!!><u!!!!!>>}},{{{{<o!!u}!>,<au}}o>}},{<!!!>a!>},<!>,<!ao!!'u!!}!!!>,<">}},{<i,'}!!!!eo}!!!>a!>,<<"!!e>,<!<"!!!>>}},{{{<u"!>,<"!!!i!>},<>}},{<"!i!!o{i!i!!!>>,{<!!{!>},iao!>},<o!!!>}{<!!!>},<>}}}}}}},{{{{<!>o!!!>"i!!!">},{{{{<!!!>u!!<a!!!>!>,<'!!uu">}}},{{{<{oo{{!>>}}}}},{{},{<o!!!>,!o!>,<!>},<!>},<!!{}!!!!<!!!>}o!>},<e!>">}},{{{},{<!>},<a}!>},<{e!>o!!<'!>},<!!>}},<'",!i!!!!{!!}"i!!ui>}},{{{<a"o!!!>!{!>},<<!!ua'!>!!<<>}},{{<!!a!!!>!!e,!!{!!!>!>},<a>},{<<!!u,!!!>},<'u!!!!"!>!!>}}},{{{{{{}},{<>}},{}},{{{<!!!>o!'ueo<!>},<i}!>,<e!!!>!!!>},<">,<"!>},<!>>},{{},{}},{{<!>,<!!<,!!!!{aei<>,{}}}},{{<!!!>{!!!!!>},<!!}{!>!!!>>,{<}!>!>"u>}}},{{}}}},{{{<i<!e<>},<!u!!!>,<,!!io!!i}'!!!>e<!>},<{"a{!!!u<>},{<!!aa!>},<e!>ou>}},{{<!""!>}!!{o!>,<!>,<"!>!!<>},{<{!!!>},<!>,<a!>},<!>,<i!!<!>},<!>,<!!"}!!u<!u!!i}!!>},{}}}},{{{{<!>!!!>>}}}},{{{<"!!!!!!"{o<!>'i>,{<a>}},{{<o!i>},{{},{{},<{!>,<}>}}}},{{<!!oae!>,<!!!>!!!>u!!o!>,<u!!<u!!<"<}>},{<!!!>i,!!!!e{!!,{{!!!>},<u<u!!a!!o>,<>}},{{{<o!!!>!"oa!<!!!>'!!!>>}},{{{{{}},<!!"u!>!o!>},<!<!a>},{{<"e!!!>,}e!!!>ii!!"!!!!,i>}}},{}},{{}}},{{{{{<!>},<!>,<eoi!>,<uauu}e!>>},{<e!>,<"a<{,!!!!!!e!e'e"!!!!!>!!!!!!!!,>}},{<}u">},{<i!!o!!u>,{<a,e<!>},<!!!oooo<'!!!>>}}}},{{},{{{{<i>,{<{o!!'<e!!!>'{!,"!!!>},<!>>}}}},{{{<i!!!!u!!>}},{}}},{{<e!'!!!>!>i{!>,"!!!!<'!!!>!!}<>,{}},{<{!>>}},{{{<{,!ui!!!,>,{<ua!>}},{{>}},{{<,'!>!>,}u>},{{{<>,{<ai>,<!!!">}},<!!!!!>!!,,!>},<>},<"}oe'i},!!e!>,<">}},{}},{<!>!!!>},<o,!>},<>}}}}}},{{<!!!>i!!e>},{{<!>,<!!!>}"!!}>},<{!!!>,<{i!{{!'o'}}>}},{{{<!!!>!>,<!!!!!>!>{,'!>},<>},{<!,!!!>!!!!!!!>>}},{<!!!>,<{{"!!!>!>,<!!{}!>,<!>!>!>,<e>},{{<!!u>},{<}!!<i!!!!!!i!!a!!!>u!{!!{!>!!!!!>,<!!<>},{{},{{}}}}}},{{{<!!"!>,<!!!>},<!>,<!>,<{!!!!<!>,<!!!>'>},{{<!>e>},{<!>},<}o{!>'i'!!!!">}},{{<o}i'!!!>ee>},<}e!>,<u<\"""!!!e{!>!>,<{'<i>}},{{<{uii!o>,<!>},<!>},<!}{!!!>e,!!ai!>!!!>!>{!!u!!,''>},{{<!!!>a<!!!eu!}o>,<!!,"o"a!>},<a{a!!u>},{}},{{{<u!>},<!!!>'!!!>e!>!>!>,<o>,{}},{{{<'"e!>!!!>"}o!>},<uee!!'o'}'>},<!!!>}!!e!!!!ou!>},<{{}!!!>!!!>,<a}"<>},{<!!!>!!{,,!a!!},'ui{>},{{<u>},{<aa{},o{oo!>},<<e{!!o>}}},{<!>,<}aa,>,{{<'!!!>!!}!!!>,<e!>,<!>},<{!'!!!!o"'"eui>},<!!!i{!!!!!>}{!'{'!>!>!}'<!>,<>}}},{{<a"a!!iau}"!>},<e">,<a!",{!!<!>,<,i>},{{},{}},{{<>},{<ua!,{>}}}}},{{{<!>,<e>},{{}}},{{{<'!!,<!>},<!}ea!>}!>},<!!!"ii,a}{i!>,<>},{<a!>},<ea"{u!!!>!!!>>}},{{{{}},{<!>},<!!!>>}}},{{{}}}},{{<!!!>},<u!><!<!>,!>},<,}!>a,a!!!>"!>,<!!!>!>>},{<{,,,uo>}}},{{<i!>},<!!}ueu,uu!>,<>,{{{{{}}},<!>},<!ioi<!!oee!!e!!>},{<ei!!!!}!>,<"}!u{,!!">,<u!!!>{!'!,!{!!oo!!!>>}}},{{<}!eue,!}!!!>"}!!!!{e!'>},<!>!!!><!!>}}},{{{{<,'!!<>,<<!>},<!!!>!!}!>!!!>}!i{i>},{{<!!!>i,,!!'aa!i''!!!!{<<,!!{!!!eo>,{}}},{<,u!'ouo>}},{{<}!>},<u>,{<}}a!!!!!>},<!>,<!>},<,!>,<!!!>,'i>}},{},{{<!!o>}}},{{<uoou!u!>},<!!!!{!{!>,<o}>,<!a!!!>a{"ei!!!>">},{{<!>,<!!e<!!!!!>!!!>,<u!!o!!!>}!!!<}>,<!>,<}"i}ie!!o{!>>}},{<<!!!!!!!>eee!>,<<>,<,,i!!!!!>!!!>>}}},{{{<a!>},<!!!>>,<,!>!!!i!!!!!><a!>},<">}},{{<!>,<i!!!!!>},<!"!i{io!!!!!>!!'!!!!,!>},<"">}}},{{},{{},{{<!e!>,<!!e}!!!>>},<u"!!!a!>,<o!!u,eea!!"!!'>}}},{{{},<!>},<io!>,<>},{{<a'e!>!!!>ie}e<ao}!!!}!<>}},{{<'o!>},<!>!>>}}}},{{{<a!>!>,<e!>,<!!!>,<{!>},<!>!!u!!>}},{{{<u!>uuu}>}},{{<{!!!!!>!!!!,{!!!!!>!!!!>},<!!!!!>{ua!!,e!}io<i!>},<u!>{,',!!>}},{{},{<a!!,e<ou{oao!!!!}>,{<u!>,<<a!{{,!>,<}!>,<!!u{o!o<!!"!>,<>,<{!!u!!!>u!!<!>,<!!!>},<!!!!e!>}!'!!!>,!!,>}}}}}},{{{{{<ui!!o!!!>,<"o!a!>,<u}>},{<i"'>}},{{{{{{<}!>}!>,<'e!!''}!!!>,<u>}},<!>},<!>},<!>},<!!!}!!!><!>,<,!>},<ao>},{<!>,<!!""!""oe},o!><,!!>,{}},{{{},{{{<!><<!!!ia{e!!!!!>'!>},<<>},{<"!!!>>}},<!!!!o!!!!!a!!i!!!>a">},{<uo!o!>,<{}{!!!>!>>,{<<ioa{a"'!>},<!!!>!uu">}}}}},{{<au!>},<!i!>i,''',!o<!>'>}},{}},{{<"!>,e"'!!!>!!!>,<>,{<o{e{"ee!o!!a!!,!!o!!!>!>,<!!a>}}}},{{<!!!>!>'}!,eo,!!!,!!!>u!!!>!<!<ia>}}},{{{{{<!!!!!>>}}}}},{{},{{{<!!!!!>ae!!ue">},{{{{<}}!!>}},{{},<eo!!!!ee!}'!!!>,{o{!>!>,<{u!!""'>}},<!!!{"eo},!!"oa!}ua>}},<a<,!i!!!>>},{{{}}}}},{{{{{<'!>!!!>!!ee!!u">}},{<i!!!>!!uu}e!>,<!>o!>},<}>,{{<!,!!>}}}},{<!!!!!>,<u}>,{<>}},{{<!!e!!!><!>,<<!!!>''!>},<>},{{<i!!!>{i'!!!>a!!!>'{>},<"<,,e}>}}},{},{{<<",'!>,<a,!!i'!>},<u'"!!ao!!!>,<>},{{<!>},<u!!!!}!}!!!e>},{<!>,<<>}},{}}}},{{{{{<}!>,<!}u!io>,{<!>},<!!!>u>}},{<"!>>}},{<,"i<!>,<'o!>},<!!{}>}},{{<eu!>},<<o}''iu!!!!!>!,,<!!eo!!!!!>>},{},{<!>,<}u!!<!<!a,ea,>,<!!"ou,"a!>!>}a{o>}},{{{<!>,<'!!!>i<!>!<!!',{,!>,<e!>}'!{o'>,{<}!>,<!!!>!>,<!>u!!!>>}},{<!!!!!><oia!!!>},<!a!!!!!>{!!!>!>!ao!!,!>!>,<>}},{{{<!>,<!!oa!}!>},<ue!>},<!<{e!>o!!!!,!!!!!>a!<>,<o!>},<!!oo'a!!!o<!>>},{{}}},<!>},<!!"!>,<{!>!!!>,u!e!u'!>,<e<!}!>,<!!!>o>},{{}}},{{<!!i!>!>},<,!ia!!u!!!!!>},<aa!!!>o!!{{{>,<,e!>'!},!!a!>,<>},{{},{<<a'!!!!!!u<!!!u<a{!!{io!!!>u{i>}},{<,o!!!>},<!>},<iae!!!>!!o'!!!>!!!>,<<<{ao>,{<>}}}}},{{{{{<!{!>,<!>},<!!a!>,<>}}},{{{<!!o!>a!>},<""ei!>,<!!!>!!!!'a>},{<}u,a'!!u!!!>!>,<u>}},{<"!>},<!>,<!!!!!o!>!>},<!!!>i!!i!!u>,<!>,<>}},{{{{{{<!!'"!!!>o!!{o"e!>},<o!!!!e}!>,<<,,>},<!!!>!!!>u,!>},<!>},<!>,<o<!!"u}!!!!u>},{<a!"<{a,!>,<!!!>},<u"o>,<!!'"!!!!!>!>},<>},{{<<{,!>e!aoi!>!!o{!>,<!>},<a'!>,>},{<,i!!'{>}}},{<>,<<i!!!>i!!ii<i{!!'}!!>},{{{<,!>},<eo!"i>},<i<!!{<!!!><'ei!>,<>},<ui}>}},{<!!!!!!,i}!!!>'o{!>,<!!!>},<>,<{!o'e',!!"{u!}e,!>!>,}}>}},{{{<o!!>},{<!ouo!!e'u{>}},{{{{<!>,<!i,{!!!>'!!<""!>!>,<!{"!!!>{!!'"{>,{<!<!!!>,<u,a<{!!!>"!!,!!e!!!!"!<!>!>},<!>>}},{{<!>!>u'!u!!!!!>},<{!>,<}}!!!>a!!e!'{i!!!!"{>}}}},{{}}},{{<!!!>!>},<i!!<!!,!!!<!>,<{!,}!!!!!>a"a'!!>},{<ae"<!>,<i!!!o}'a!>!!!!"ua!!'>}}},{{{{{<!!!>>},<'!!!>},<a!!'e>},{<i!!!>eoe}e!!!>!!,'!!!>a{>}},{{<>,<<>},{<!!}e!!,eo},<o}>,{<{!>,<<{>}},{}}},{{<!>!!<,}e}u,!>,<<!!",,!!!>>},<"!>},<!>,<u!>,<o!!!>!>,<eai!>},<uu!>{,i>}}}},{{{<eea!!!>",a!>},<>,<!!!!!>ue<<!>!!!>!!!>>},{{},{<i!>},<!>,<'!!!!a,'!}>}},{<,u{!>},<ae,!>'u!!!!!>ai!!o>,{{<'e!!i!!!>}<'u!>,<!>,<eo>}}}},{{<u{,o!!!>!,!>},<!!e!!!>o!>'o!>>,<!>,<!!o>}},{{{<a!>,<{,u!>},<!>},<!,!e!!!>,!>,<!<>},<!!uo'o'!eia!!!>,!>,<a!>,<!!!>>},{<,!!!><<i!>,<!>!>},<>,{{}}}}},{{{<!!!!!>aau{<!,ii},!>u{"'>}}},{},{{{{{<{u'!>,<!,!>},<<<!!!>!>,<>}}},{<,!o!!!{>}},{{<!>,<!!!>a"eu!!!>''!i>,{{{<!!i<ui>}}}},{{{},<!!!!!>!>,<!!!>e!!a>},<!>,<!!!!{!>"<!!!>e>},{{<ia{",!!!>,!!!>!>},<>},{<{e<!i{}!!io!!!!e!><i>,<!>},<}!>,<!><!!!{!>},<!}>}}},{{{},{<!>},<,e!>},<i,,'{a!">}},{<!"}{a!!i>,{<!>},<,u!!{a!>},<!!,,!>>}},{}},{{{<!>,<!>},<}!>,{}!>},<a"{,!!i!>,<!!!>,<}o>},<!!<a<!>,<a!!u,u>},{{},{{},{}}},{<!>,<!!!!!>!>},<{u!>,<'!!}!'u>}}}},{{{{},{{{{{{<<"a"!>,<>},<!>i,e!!!!{!<!>e}i!o"!!!!e!>},<!>e>},{}},{{}}}},{{{{{<<>}},{{{{{{{}}},<o!>},<a''{a!}!!!>u!!,!!o!,i!>},<!>},<!!!>>}},{{<'<!>a!>,<,!>{{"a!!!>"!!!>e!!!>>,{<a,,!>!!!>a!>},<!'a>}},{<a!>,<!!o!>,<!'!!!!!>},<}e!o,!>!!!>,<ia!!!>,<e>}},{<,!!!>!!"!!!>!>},<!!!!!>,<,'!!!!!>,<!>i!>a>,{{<!>'a'u!}a!>},<i!>},<!!!>!!!>},<<>}}}},{<!>},<"!>},<>,{{<e">,<!>},<!}u!>},<!!<!!i,!>},<!!!>,<!!!>!>,<"!!!>>},{<!!!>!>!!!>a'>}}},{{<!>o'>},{<!>"a"!!!>!!oeeo!>!>!>},<>},{{{<>},{{<o!i!!!!!>}e{!>},<!>},<i>}}}}}},{{},<>}},{{<!!oo<}"!>'!!!>u},!>},<>},{{<a,ee<!>'!ee!!!>u!>,<o!>},<!<}!{>,{{<!>ea!>,<{'!!!>a<!!"u"!>,<i!!!>!>},<>}}}},{}},{}}},{{<!>!>},<e!!a!>},<!>,<'e!!,u}!!oe>,{<!o!!!!!>},<!!<!>,<oao!!u<o{{!>},<<e<}>,<!a<oeu!>},<e,!!}a>}},{{}}},{{{<!>,!!''!>!!!>o!>"!>,<!!a"!>,<!!o{!!e>},{<!!!><,}o!!!!!>!>,<!a'o<"{!>,'!>!>,<>}},{{<{!>},<!!!!!!{!>,<"'!>},<"!!!>!!!>!!e!!!!!>,<u!>,<'i{>},{}}}},{{{<o!!!>!!,!>,<u}!>},<!!!>},<!!!!!',eo!>},<,'<<>},<!>!!!>!!},!>>}},{{<!',!!oou'}!>},<!>,<!>!>},<}!>},<o!!!!a!>i>,{<<!!a<!>},<,{<!'!>},<>}}}},{{{{<!>,<>,{}}},{{{{{<!>},<<!e!!!!!>!>},<<,'!>,<a!!e>},{<!!!!!>"!!{{<>}}}},{{<!>},<!!!>"<!>,<{{!!!>,<!!}{!!!>!!!>"!eu!!<!!i>},{{<!><!>i!!!!}!!u!>},<<!!u!>,<i!!!!!>,<>,{<!>},<u!!!!}!!!!,,!>'!>}!!}<a!!!!'o!!!>,!!>}},{<"ai!>},<,''>,{<!>}"!!!>}!!!!e!>},<i!!!>,<>,<!!!!u!>,<!,!!!>!!!>!iaae!>i,o!>!!!>>}}},{{}}}},{{<!!!a!!au!>!>,<<{},<!>,<!!,}i!!}o}>},{{<oa<'!,!"a>}}},{{<!<"u!!>},{{<,">},{{{<!!}!>,<!>,<'{,iuoi!>,<{}{!!,'>}},<!!ei!!e!!!!,"!!!!,!>,<!!!>e>}},{<!'}{!,!!!>!!!>'{!!!!,iu!>,<a{!>,<!!i>}}},{{{<!!!>!>},<!!''>},{{<!!{!!!>e!!"<>},{<!!'o,!!}!i>}}},{<<a!!i!!}!>!>,<,!!!>!!!'u<{!!a>,<>},{}},{{{{<,a}!',!>{>}}}}}},{{{{{<!oue}<!!'<i<<!>}}}i>},{{}}},{{{{}},{<a!<!{u<!>},<!>,<!<{,'"!!!>{!!!>,<!>,<}a,>,{}},{{<ia!!}!!'a!i!!<>,{}},{{<!!<!o,!}!>,<!>,<{{!>},<>}},{<<!!}"!}!!!>e!!>}}},{<ei,a!!!>}ua!!!>!!!>>}}},{{{{},{{<!>,<'}!'!!e!>,<!!}o"u!!!>'i<>},{<!>,<{}i!!"!!e!!!a{oe!>},<{'>}},{<,!}!>,<a!!a,''e>,{<"i"!>},<<,a!>},<io!>},<eieiei">}}},{{<>,{}}},{{<<<<i{{!><o!!!>!!!>o!!}!!>},{},{{{<!>},<i<ii>},{},{{<!><<oe<}',u!>}!!<!!o>},{<>}}},{},{<u!>"!>,<!u}!>},<i!>,<}>,{<!>,<u!!!!"'!>,<'>}}}}},{{<!><>,{<"}i!!{<{!>,<!!!>!!'!!'!>,<ui!>},<>,<!!!>,<}}!!!>"!!!>!>},<"!>,<!>,<>}}},{{{<'o>},{{<{!>},<<'o!!o!!!>,<"!!!>>}},{{<{uo}<,!>},<ee<{o<,!>},<<"i>,<!>,<"!!!>},<!}!!!>},<!>>},{},{}}},{<>},{{<}>,{<i{!>!<!!!!!e!!!>!>,<!>},<!>,<o<u>}}}},{{}}},{{{<a<!>},<<u!!,>},{<>,<}u!!o!!!!!i>}},{{{{{<<">,{}},{{{<a}<,}!!<,<>}},{{<e!!"o,"u>}},{{<!>,<,!>,<}"o>},{{<u!!!!!!'!>,<e!!'!>oeiai!!""i>},{<!>!!!!uu!!a>}}}},{{<!>!!!!!>!>!>,<,o!!!!!>{>},{{}}}},{{<!!!>!>},<,!!!!<<!!{!!>}},{{<o!>},<"!>,!><"!!!>},<'!!!>!!!!!>oo>},<!!!>}}!>},<e{}!>!>},<a>}}},{{{{<!!,,",!}!!<i<!i!,eo!a!!!>>,{{<u>}}},{{{{<!!e!!!>!!u!!e!"ei!>},<e<},{''>},<!!!>i!>a!>,<"i">},{}},{{},{<!!!!'i{"o!>},<o!}!!i!!!>a>},{{{<ie,!>},<<<{ue""!>},<!!<,a!>},<!>},<>}},<{!>,<>}}},{{<i!>},<a!!!>>,{<i,{o!a'<u"!>},<ii{"ue>,<!>},<<!!!>!!a!>}!!!>"i!!!>i!!!!!>!!!!ee!>},<!>,<}>}},{<'!>"ao!>,<!>},<{"!>},<,!>!>,<'"}o>}}},{<!>a!>{<!!!!!>'!!!!<!>},<u<"'>,{}},{<<!!}!!!>!>!>!>>}},{{{{<{o!'<a}>},<!>>},<ou!!!>,<!,!>,<>}},{{{<!>,<!>!!!>!!!>,<o!!!>!!!!a<<}>,{}},{<}!!}a!>,<!!!>!!iua}a>}},{{<!>},<!o!>oaa!>}>},{<!i!!!>oa!<!!e{eu<oe!>,<e!>!!!i<!>},<>}},{{<i>},<!>!>},<{!!!>!!{!!!>,<>}},{{<!!ioi!>},<o'}>},{<!e!>,<!!!>{eo!!!!!>!!,'!>},<{>,<,u'a!!!!!>!!!!!>oa>}}}},{{{{{{<!!!>},<!!!>}i!!!>>,{<!!{>}},{<<a<a'!>},<}}i!!!>ie!>!",!!'>,{{{<!!,!!!>,<!uo!!!>,<iu>}}}}},{{}},{<}i!!!>!!!>},<!!!!!>},<e,!>,<oua<!!!>},<>,<!!!>,i>}},{{<>}}}}},{{{{},{<<u!>},<{o}!!!>e!>!>!u"!>,<ea<!>},<>}}},{{<{e{o}""e!i!!{ei!>},<>,{<!!i>}},{{{<!>,,u!>},<,ou}!>},<!!e!!!!!>>},{<!>,<!!u!>,<!!}!!'!!!!!!{!!,!!}!!,!!<e>}},{{}}}},{{{<a!>,<}i!>,<>}},{<!<e!eae>},{{<}''{!>},<u>,{{<<!!!!!!'!!!>},<!>},<{}">}}},{}}}}},{{},{{<i}!>!>i!>},<ai,!!}a!!!>u!>},<",>,<!!!>}!!'{!ee!e!oo!>!>,<!!!}{}">},{{{<!!u!>,<"!!e!!<e<!!>,{{<ou<ui'u!!!!}!>,<!}!">}}},{}}},{{{},{}},{<ai!!!>'!!!<!!!!!!a!!"!>,<!>!ii,!>},<'<!>},<>}}},{{{{{{<>},<}',!!!o<{!!!>!!u!}i<e!!<'>},<}!>},<aoea'!>!>,<!!!!!!}<ou"}>},<!>},<i!>,<euu>},{<}e!!a>,<',>},{}}},{{{{<!!!>{!!!>,<}>,{<o!>,<u!!!<!>>}},{<!!!!!!!>ue<!e{,!>},<o<!>!>i>}}}}},{{{},{{{}},{{<aee>},<!>!!}{{'!!!>!!!>!<!ui"!,o<>},{{<o!!!!i!>!"!>e<e!>},<!>!>,<!!!!}"!!!'!>>},{<"}!!!>!>!!<>}}},{{<!{i>,<"!!!u!!eo!>,<!!!>o!!}!!i!!}iu!>>}}},{{{{<},!!!!!>'{!>},<o"!>,<e",,>},<{e{eu!>,<!!!>,<'!!!>!>,<e<>},{<u<!><!>},<!>!!!!!>>}},{<e!>},<'!!uoi{!>},<!>!"!>u!>,<!!o!},!>},<!>">,<<!>},<!>!!"!!!e>},{{}}}}}},{{{<u!>,<!{!{!>},<ao,!!e!!{<!>,<i'>},{<,!!,{{>,{<,e}',!>,<!!{,e}!!{!>},<{!>,<!!!>>}},{}},{{{{{{<e>},{}},{{<o'u!!{{'!!'!!e"ia>}}},{<>},{{{{<!{!eauie<<<!!!,!!a>},{<<!>u!!!>e'<!{}!{e!!,!}!>!>},<o>}},{{<!!!>i!>},<!>>}}}}},{{{{},<!>!>!!!!}a!>},<{{}e,i'o!>,<!!!!!>},<'>},{{}}},{{<>},{{<!!!>e!!}a'}'!>{eo{o!!!>!!!!!!>},{<!>},<{''}!>},<!>},<,!",oiu>,{<!<!!!!!>e,!<a!>},<>}}}},{{}}},{{{},<'o}}"!>},<!!a}i}"!!!>>},{{{{{<>,{}}}},<o'ea!"!>},<}>}}},{{},{{<io!!!>!>a}!!!!!"u{!>!!!!e">},{{{<ue{}<"'a},{!!!!!>!ue!!!>!!!>!!>,<'"u!!!>ee<!!!>!}!{,!!!!>}},{{{{<"{!!!>},<eo!'o<>}}},<!>!>'eo>}}},{{<!>,!!i'!!!>,<{o{'>},<!>},<!>!!!!<""!!!>i>}}},{{{{{{{<!>,<{!>,<e!!'!>a<!>},<>,<!u!!u{<a>},<!!>}}},{{<!!!!!>a>}}},{{{}},{<!!,iu!}!!''}"i!>!!'o!>},<!>,>}}},{{{{}}},{{},{},{{{<}'u!!!>u!!o>},<!e!u>},{{{<!>,<"!e!!e',"!!!>!>,<!!a!>,<!!!!!>!>},<!<'ua{!!}>},<e!>},<'<!!!>},<!!!>,<,e"!>},<!><>}}}},{{<i!e!!!>!!!!,!>!>,<>,<!>i!!u!>,<<>},{{{<!>,<!>},<!>},<!{!>i!!!!!>i!!!}!!eue">},{<!!!>},<!ie"!<>}},{{},<<ue'u>}}}},{{{<a!>">,{}},{<!!!!!!"a>},{<<!>},<i!>},<!!}u}!!{a!>,<!>},<>,<!>,<!!!!!>!!!>'!!!>}}"o{!!>}},{<,}""a>,<""!>,<!!,>},{}}},{{{<"!!!>!>!>},<,!!!>},<e'!!a>},{<}u<!>,<a!!!>,!!u!>},<u",!>""!>!!>,<i!>ia!>,<<!!<!i!>u!>,<ee}>}},{{<!>},<!!'>}}}}}}},{}}
"""
print(score(clean(input))) |
987,435 | 1b9b2909e24bbc6973a456a40dc400d2934f9b32 | import scipy as sp
import scipy.linalg as linalg
from timeit import timeit
def DoolittleLU(matrix: sp.ndarray, dtype=float):
n, m = matrix.shape
l = sp.zeros((n, n), dtype=dtype)
u = sp.zeros((n, m), dtype=dtype)
for i in range(n):
for k in range(i, m):
s = 0
for j in range(i):
s += l[i][j] * u[j][k]
u[i][k] = matrix[i][k] - s
l[i][i] = 1
for k in range(i+1, n):
s = 0
for j in range(i):
s += l[k][j] * u[j][i]
l[k][i] = (matrix[k][i] - s) / u[i][i]
return l, u
def CroutLU(matrix: sp.ndarray, dtype=float):
n, m = matrix.shape
l = sp.zeros((n, m))
u = sp.zeros((m, m))
for j in range(m):
for i in range(j, n):
s = 0
for k in range(j):
s += l[i][k] * u[k][j]
l[i][j] = matrix[i][j] - s
u[j][j] = 1
for i in range(j+1, m):
s = 0
for k in range(j):
s += l[j][k] * u[k][i]
u[j][i] = (matrix[j][i] - s) / l[j][j]
return l, u
def LUDecomposition(matrix: sp.ndarray, dtype=float):
n, m = matrix.shape
L = sp.identity(n, dtype)
U = sp.array(matrix, dtype=dtype)
for i in range(1, n):
for j in range(i):
L[i][j], U[i][j] = U[i][j]/U[j][j], dtype(0)
for k in range(j+1, m):
U[i][k] -= U[j][k]*L[i][j]
return L, U
def LUPPDecomposition(matrix: sp.ndarray, dtype=float):
n, _ = matrix.shape
absmatrix = sp.absolute(matrix)
scales = sp.amax(absmatrix, axis=1)
p = sp.arange(n)
p = sorted(p, key=lambda i: absmatrix[i][0]/scales[i], reverse=True)
P = sp.zeros((n, n))
for idx, pivot in enumerate(p):
P[idx][pivot] = 1
L, U = LUDecomposition(P@matrix, dtype)
return P.transpose(), L, U
def LUCPDecomposition(matrix: sp.ndarray, dtype=float):
n, m = matrix.shape
absmatrix = sp.absolute(matrix)
rowscales = sp.amax(absmatrix, axis=1)
colscales = sp.amax(absmatrix, axis=0)
p = sp.arange(n)
p = sorted(p, key=lambda i: absmatrix[i][0]/rowscales[i], reverse=True)
q = sp.arange(m)
q = sorted(q, key=lambda j: absmatrix[0][j]/colscales[j], reverse=True)
P = sp.zeros((n, n))
for idx, pivot in enumerate(p):
P[idx][pivot] = 1
Q = sp.zeros((m, m))
for idx, pivot in enumerate(q):
Q[idx][pivot] = 1
L, U = LUDecomposition(P@matrix@Q, dtype)
return P.transpose(), L, U, Q.transpose()
def CholeskyDecomposition(matrix: sp.ndarray, dtype=float):
n, _ = matrix.shape
L = sp.zeros((n, n))
for i in range(n):
for j in range(i):
L[i][j] = matrix[i][j]
for k in range(j):
L[i][j] -= L[i][k] * L[j][k]
L[i][j] /= L[j][j]
L[i][i] = matrix[i][i]
for k in range(i):
L[i][i] -= L[i][k] ** 2
L[i][i] **= 0.5
return L
def LDLTDecomposition(matrix: sp.ndarray, dtype=float):
L, U = LUDecomposition(matrix, dtype)
return L, sp.diag(sp.diag(U))
def AssertLU(matrix: sp.ndarray, msg=''):
print(msg)
P, L, U = linalg.lu(matrix)
if sp.allclose(matrix, P@L@U, rtol=0):
print('PASS: linalg.lu')
print(
f'time: {timeit(lambda: linalg.lu(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: linalg.lu')
L, U = DoolittleLU(matrix)
if sp.allclose(matrix, L@U, rtol=0):
print('PASS: DoolittleLU')
print(
f'time: {timeit(lambda: DoolittleLU(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: DoolittleLU')
L, U = CroutLU(matrix)
if sp.allclose(matrix, L@U, rtol=0):
print('PASS: CroutLU')
print(
f'time: {timeit(lambda: CroutLU(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: CroutLU')
L, U = LUDecomposition(matrix)
if sp.allclose(matrix, L@U, rtol=0):
print('PASS: LU decomposition')
print(
f'time: {timeit(lambda: LUDecomposition(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: LU decomposition')
P, L, U = LUPPDecomposition(matrix)
if sp.allclose(matrix, P@L@U, rtol=0):
print('PASS: LUPP decomposition')
print(
f'time: {timeit(lambda: LUPPDecomposition(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: LUPP decomposition')
P, L, U, Q = LUCPDecomposition(matrix)
if sp.allclose(matrix, P@L@U@Q, rtol=0):
print('PASS: LUCP decomposition')
print(
f'time: {timeit(lambda: LUCPDecomposition(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: LUCP decomposition')
# Symmetric matrix only
if matrix.shape[0] == matrix.shape[1] and sp.allclose(matrix, sp.transpose(matrix), rtol=0):
L, D = LDLTDecomposition(matrix)
if sp.allclose(matrix, L@D@L.transpose(), rtol=0):
print('PASS: LDLT decomposition')
print(
f'time: {timeit(lambda: LDLTDecomposition(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: LDLT decomposition')
L = CholeskyDecomposition(matrix)
if sp.allclose(matrix, L@L.transpose(), rtol=0):
print('PASS: Cholesky decomposition')
print(
f'time: {timeit(lambda: CholeskyDecomposition(matrix), number=1000) * 1000: 3.0f}ms')
else:
print('FAIL: Cholesky decomposition')
print()
def main():
A = sp.array([[1, 1, 1],
[1, 2, 4],
[1, 3, 9]])
AssertLU(A, 'Low rank(3) Vandermonde matrix')
A = sp.array([[-1]])
AssertLU(A, 'Single element matrix')
A = sp.array([[3, 2, 4],
[2, 4, 3]])
AssertLU(A, 'Non-square(2, 3) matrix')
A = sp.array([[2, 4],
[3, 3],
[4, 2]])
AssertLU(A, 'Non-square(3, 2) matrix')
A = sp.array([[1, -2, 0],
[-2, 1, -2],
[0, -2, 1]])
AssertLU(A, 'Symmetric matrix')
A = sp.array([[4, 12, -16],
[12, 37, -43],
[-16, -43, 98]])
AssertLU(A, 'Positive-definite matrix')
A = sp.array([[1, 2, 3],
[2, 4, 7],
[3, 3, 3]])
AssertLU(A, 'Bad condition of naive LU decomposition')
A = sp.array([[1, 9, 3],
[2, 2, 7],
[3, 3, 3]])
AssertLU(A, 'Bad condition of LUPP decomposition')
A = sp.zeros((16, 16))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i][j] = (i+1)**j
AssertLU(A, 'High rank(16) Vandermonde matrix')
A = sp.zeros((17, 17))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i][j] = (i+1)**j
AssertLU(A, 'High rank(17) Vandermode matrix')
A = sp.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
AssertLU(A, 'Singular matrix')
A = sp.rand(50, 50)
AssertLU(A, 'Big(50, 50) matrix')
if __name__ == '__main__':
main()
|
987,436 | 73da994a0a3d30be4aff9c23acf055f07400b25d | import unittest
import logging
from api.login_api import LoginAPI
from utils import assert_common
class TestIHRMLogin(unittest.TestCase):
"""测试IHRM登录类"""
def setUp(self) -> None:
pass
@classmethod
def setUpClass(cls) -> None:
cls.login_api = LoginAPI()
def tearDown(self) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def test01_login_success(self):
"""登录成功测试方法"""
response = self.login_api.get_login_url("13800000002", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("登录成功接口返回的数据为:{}".format(jsonData))
# 断言
# self.assertEqual(200,response.status_code) # 断言响应状态码
# self.assertEqual(True,jsonData.get("success")) # 断言json响应数据中的success
# self.assertEqual(10000,jsonData.get("code")) # 断言json响应数据中的code
# self.assertIn("操作成功",jsonData.get("message")) # 断言json响应数据中的message
assert_common(self, response, 200, True, 10000, "操作成功")
def test02_username_is_not_exist(self):
"""用户不存在测试方法"""
response = self.login_api.get_login_url("13900000002", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("用户不存在接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test03_password_error(self):
"""密码错误测试方法"""
response = self.login_api.get_login_url("13800000002", "error")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("密码错误接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test04_empty_username(self):
"""用户名为空测试方法"""
response = self.login_api.get_login_url("", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("用户名为空接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test05_username_contains_special_character(self):
"""账号包含特殊字符测试方法"""
response = self.login_api.get_login_url("!@#$%^&*()*", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("账号包含特殊字符接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test06_empty_password(self):
"""密码为空测试方法"""
response = self.login_api.get_login_url("13800000002", "")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("密码为空接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test07_username_contains_chinese(self):
"""账号包含中文测试方法"""
response = self.login_api.get_login_url("13中00000002", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("账号包含中文接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
def test08_username_contains_space(self):
"""账号包含空格测试方法"""
response = self.login_api.get_login_url("13 000 0002", "123456")
# 接收返回的json数据
jsonData = response.json()
# 调试输出登录接口的数据
logging.info("账号包含空格接口返回的数据为:{}".format(jsonData))
# 断言
assert_common(self, response, 200, False, 20001, "用户名或密码错误")
if __name__ == '__main__':
unittest.main()
|
987,437 | 6b8aec1bea3ac77e58ebab4d8014f7a73eb39df6 | # app.py
import os
import json
from datetime import datetime
import requests
def getDialogue(theBillerCode):
print('getDialogue')
# from botocore.vendored import requests # this is needed for lambda
from requests.utils import quote
from lxml import html
from bs4 import BeautifulSoup
# wpURL = "https://bpay.com.au/BillerLookupResults?query={billerCode}"
wpURL = "https://bpay.com.au/BillerLookupResults?query={billerCode}"
agent = {"User-Agent": "Mozilla/5.0"}
url = wpURL.format(billerCode=theBillerCode)
print('bpay URL={}'.format(url))
response = requests.get(url, headers=agent)
print('done')
root = html.fromstring(response.content)
xpath = '//*[@id="tab1"]/div/div/div/div[1]/h3/text()'
numResults = root.xpath(xpath)
# these are the fields of interest
billerCodeXP = root.xpath('//*[@id="tab1"]/div/div/div/div[2]/div/div[1]/p[1]')
billerShortXP = root.xpath('//*[@id="tab1"]/div/div/div/div[2]/div/div[1]/p[2]')
billerLongNameXP = root.xpath('//*[@id="tab1"]/div/div/div/div[2]/div/div[1]/p[3]')
locationOfReferenceNumberXP = root.xpath('//*[@id="tab1"]/div/div/div/div[2]/div/div[1]/p[4]')
billerAcceptsXP = root.xpath('//*[@id="tab1"]/div/div/div/div[2]/div/div[1]/p[5]')
if len(numResults) == 0:
print("No results")
response = {"Dialogue": "No results found"}
return json.dumps(response)
results = "Results {} {} {} {} {} {}".format(
numResults[0].strip(),
billerCodeXP[0].text,
billerShortXP[0].text,
billerLongNameXP[0].text,
locationOfReferenceNumberXP[0].text,
billerAcceptsXP[0].text)
results = { "numResults": numResults[0].strip(),
"billerCodeXP" : billerCodeXP[0].text,
"billerShortXP" : billerShortXP[0].text,
"billerLongNameXP" : billerLongNameXP[0].text,
"locationOfReferenceNumberXP" : locationOfReferenceNumberXP[0].text,
"billerAcceptsXP" : billerAcceptsXP[0].text
}
response = {
"statusCode": 200,
"results": results # json.dumps(body)
}
return response
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("eg. python app.py billerCode")
sys.exit(1)
billerCode = sys.argv[1]
res = getDialogue(billerCode)
print(res)
|
987,438 | 552df6050e2f32fad0549c924aaf8db7772de54c | #! /usr/bin/python3
from iris.test.rdma.utils import *
from infra.common.glopts import GlobalOptions
from infra.common.logging import logger as logger
from iris.config.objects.rdma.dcqcn_profile_table import *
def Setup(infra, module):
return
def Teardown(infra, module):
return
def TestCaseSetup(tc):
logger.info("RDMA TestCaseSetup() Implementation.")
rs = tc.config.rdmasession
rs.lqp.rq.qstate.Read()
tc.pvtdata.rq_pre_qstate = copy.deepcopy(rs.lqp.rq.qstate.data)
rs.lqp.rq.qstate.data.congestion_mgmt_type = 1;
rs.lqp.rq.qstate.WriteWithDelay()
tc.pvtdata.test_timer = 1
rs.lqp.sq.qstate.Read()
tc.pvtdata.sq_pre_qstate = copy.deepcopy(rs.lqp.sq.qstate.data)
rs.lqp.sq.qstate.data.congestion_mgmt_type = 1;
rs.lqp.sq.qstate.WriteWithDelay()
tc.pvtdata.msn = (tc.pvtdata.sq_pre_qstate.msn + 1)
tc.pvtdata.sq_cindex = tc.pvtdata.sq_pre_qstate.c_index0
# Read DCQCN Profile pre state
tc.pvtdata.lif = rs.lqp.pd.ep.intf.lif
dcqcn_profile = RdmaDcqcnProfileObject(tc.pvtdata.lif, 0)
dcqcn_profile.Read()
tc.pvtdata.dcqcn_profile_pre_qstate = copy.deepcopy(dcqcn_profile.data)
dcqcn_profile.WriteWithDelay()
# receive CNP packet to ring the first DCQCN_RATE_COMPUTE_RING doorbell, in cnp_recv_process alpha value will be set to 65535 initially,
logger.info("RDMA DCQCN State read/write")
rs.lqp.ReadDcqcnCb()
tc.pvtdata.dcqcn_pre_qstate = rs.lqp.dcqcn_data
# Setting target rate to 100 gbps and rate-enforced to 10 gbps
rs.lqp.dcqcn_data.cur_timestamp = rs.lqp.dcqcn_data.last_cnp_timestamp + 833 * (tc.pvtdata.dcqcn_profile_pre_qstate.rp_rate_reduce_monitor_period)
rs.lqp.dcqcn_data.target_rate = 100000
rs.lqp.dcqcn_data.rate_enforced = 10000
rs.lqp.dcqcn_data.byte_counter_exp_cnt = 0
rs.lqp.dcqcn_data.cur_byte_counter = 20
rs.lqp.dcqcn_data.timer_exp_cnt = 0
rs.lqp.dcqcn_data.num_alpha_exp_cnt = 5
rs.lqp.dcqcn_data.byte_counter_thr = 1024
rs.lqp.dcqcn_data.cur_avail_tokens = 10000
# Feed initial integer alpha and g values.
rs.lqp.dcqcn_data.alpha_value = 65535
rs.lqp.dcqcn_data.max_rate_reached = 1
rs.lqp.dcqcn_data.sq_cindex = tc.pvtdata.sq_cindex
rs.lqp.WriteDcqcnCb()
# Read CQ pre state
rs.lqp.sq_cq.qstate.Read()
tc.pvtdata.sq_cq_pre_qstate = rs.lqp.sq_cq.qstate.data
# Read AQ pre state
tc.pvtdata.lif = rs.lqp.pd.ep.intf.lif
tc.pvtdata.aq = tc.pvtdata.lif.aq
PopulateAdminPreQStates(tc)
return
def TestCaseStepTrigger(tc, step):
logger.info("RDMA TestCaseStepTrigger() Implementation with step_id: %d" % (step.step_id))
if (GlobalOptions.dryrun): return True
if step.step_id == 0:
logger.info("RDMA TestCaseStepTrigger() - Setting the system time for FAST_TIMER to 0")
timer = tc.infra_data.ConfigStore.objects.db['FAST_TIMER']
timer.Step(0)
if step.step_id == 1:
logger.info("RDMA TestCaseStepTrigger() - Fast Forwarding the system time by by 56 ticks for FAST_TIMER wheel")
timer = tc.infra_data.ConfigStore.objects.db['FAST_TIMER']
timer.Step(56)
return
def TestCaseStepVerify(tc, step):
if (GlobalOptions.dryrun): return True
logger.info("RDMA TestCaseVerify() Implementation.")
rs = tc.config.rdmasession
rs.lqp.rq.qstate.Read()
tc.pvtdata.rq_post_qstate = rs.lqp.rq.qstate.data
rs.lqp.sq.qstate.Read()
tc.pvtdata.sq_post_qstate = rs.lqp.sq.qstate.data
rs.lqp.ReadDcqcnCb()
tc.pvtdata.dcqcn_post_qstate = rs.lqp.dcqcn_data
if step.step_id == 0:
# verify that token_id is incremented by 1
if not VerifyFieldModify(tc, tc.pvtdata.rq_pre_qstate, tc.pvtdata.rq_post_qstate, 'token_id', 1):
return False
# verify that nxt_to_go_token_id is incremented by 1
if not VerifyFieldModify(tc, tc.pvtdata.rq_pre_qstate, tc.pvtdata.rq_post_qstate, 'nxt_to_go_token_id', 1):
return False
######################### Verify DCQCN params ############################
# verify that timer_exp_cnt is set to 0.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'timer_exp_cnt', 0):
return False
# verify that byte_counter_exp_cnt is set to 0.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'byte_counter_exp_cnt', 0):
return False
# verify that cur_byte_counter is set to 0.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'cur_byte_counter', 0):
return False
# verify that target-rate is set to rate-enforced.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'target_rate', 10000):
return False
# verify that rate-enforced is cut by half.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'rate_enforced', 5000):
return False
# verify that num-cnp-received is incremented by 1.
if not VerifyFieldModify(tc, tc.pvtdata.dcqcn_pre_qstate, tc.pvtdata.dcqcn_post_qstate, 'num_cnp_rcvd', 1):
return False
# verify that num-cnp-processed is incremented by 1.
if not VerifyFieldModify(tc, tc.pvtdata.dcqcn_pre_qstate, tc.pvtdata.dcqcn_post_qstate, 'num_cnp_processed', 1):
return False
# verify that alpha value is set to 65535 based on dcqcn algorithm calculations.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'alpha_value', 65535):
return False
if step.step_id == 1:
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'alpha_value', 0):
return False
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'num_alpha_exp_cnt', 0):
return False
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'timer_exp_cnt', 1):
return False
if not VerifyFieldModify(tc, tc.pvtdata.rq_pre_qstate, tc.pvtdata.rq_post_qstate, 'p_index5', 1):
return False
# verify that target-rate is set to rate-enforced.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'target_rate', 10000):
return False
# verify that rate-enforced is cut by half.
if not VerifyFieldAbsolute(tc, tc.pvtdata.dcqcn_post_qstate, 'rate_enforced', 7500):
return False
return True
def TestCaseTeardown(tc):
logger.info("RDMA TestCaseTeardown() Implementation.")
#Disable congestion mgmt in qstate
rs = tc.config.rdmasession
# Restore rqcb/sqcb state
rs.lqp.rq.qstate.Read()
rs.lqp.rq.qstate.data = copy.deepcopy(tc.pvtdata.rq_pre_qstate)
rs.lqp.rq.qstate.WriteWithDelay()
rs.lqp.sq.qstate.Read()
rs.lqp.sq.qstate.data = copy.deepcopy(tc.pvtdata.sq_pre_qstate)
rs.lqp.sq.qstate.WriteWithDelay()
return
|
987,439 | f9a377813b54c6d4b475e5f8e01dc5c176a25a6b | 方法一:
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
ans = 0
ch = {}
for c in chars:
ch[c] = ch.get(c,0) + 1
for w in words:
wc = {}
target = 1
for c in w:
wc[c] = wc.get(c,0) + 1
if wc[c]>ch.get(c,0):
target = 0
break
if target==1:
ans += len(w)
return ans
方法二:
显然,对于一个单词 word,只要其中的每个字母的数量都不大于 chars 中对应的字母的数量,
那么就可以用 chars 中的字母拼写出 word。所以我们只需要用一个哈希表存储 chars 中每个字母的数量,
再用一个哈希表存储 word 中每个字母的数量,最后将这两个哈希表的键值对逐一进行比较即可
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
ans = 0
ch = collections.Counter(chars)
for w in words:
wc = collections.Counter(w)
for c in w:
if wc[c]>ch[c]:
break
else:
ans += len(w)
return ans
时间复杂度:O(n),其中 n 为所有字符串的长度和。我们需要遍历每个字符串,
包括 chars 以及数组 words 中的每个单词。
空间复杂度:O(S),其中 S 为字符集大小,在本题中 S 的值为 26(所有字符串仅包含小写字母)。
程序运行过程中,最多同时存在两个哈希表,使用的空间均不超过字符集大小S,因此空间复杂度为O(S)
|
987,440 | db7b3e246afcd63d1bf65059705b414d870ae1b1 | #!/usr/bin/python
# _*_ coding:utf-8 _*_
import sys
import knock30 as takayuki
def main():
mecab_file = open(sys.argv[1], "r")
all_sentences = takayuki.make_morphdicts(mecab_file)
mecab_file.close()
length_dict = dict()
nounstring = str()
noun_count = 0
for one_sentence in all_sentences:
for morphdict in one_sentence:
if morphdict["pos"] == "名詞":
nounstring += morphdict["surface"]
noun_count += 1
else:
length_dict[noun_count] = nounstring
nounstring = str()
noun_count = 0
print length_dict[max(length_dict.keys())]
if __name__=="__main__":
main()
|
987,441 | ff5a3ffd646d5e20000db0707e2f2ff52c8cee48 | # Generated by Django 2.1.3 on 2019-04-29 05:05
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photaMusic', '0010_auto_20190429_0504'),
]
operations = [
migrations.AlterField(
model_name='publictrack',
name='expiry',
field=models.DateTimeField(default=datetime.datetime(2019, 4, 29, 15, 5, 12, 998282), verbose_name='Expiry Time'),
),
]
|
987,442 | bd1510cd60515b55745934ee8a43c98dd4419d3e | #### the standard form ==> ax^2 + bx + c = 0 #####
import cmath
a = float(input('please enter the coefficient of X^2: '))
b = float(input('please enter the coefficient of X: '))
c = float(input('please enter the coefficient of C: '))
d = b ** 2 - 4 * a * c
root1 = (-b - cmath.sqrt(d)) / (2 * a)
root2 = (-b + cmath.sqrt(d)) / (2 * a)
print(f'the root1 is: {root1}')
print(f'the root1 is: {root2}')
|
987,443 | d58c9dd08f3bb556791ee986d8a7fdd95804d4aa | from sqlalchemy import create_engine, Integer, String, Float
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
# 创建数据库的连接
engine = create_engine("mysql+pymysql://root:root@127.0.0.1:3306/lagou?charset=utf8")
# 操作数据库,需要我们创建一个session
Session = sessionmaker(bind=engine)
# 声明一个基类
Base = declarative_base()
class Lagoutables(Base):
# 表名称
__tablename__ = 'lagou_data'
# id,设置为主键和自动增长
id = Column(Integer, primary_key=True, autoincrement=True)
# 岗位ID,非空字段
positionID = Column(Integer, nullable=True)
# 经度
longitude = Column(Float, nullable=False)
# 纬度
latitude = Column(Float, nullable=False)
# 岗位名称
positionName = Column(String(length=50), nullable=False)
# 工作年限
workYear = Column(String(length=20), nullable=False)
# 学历
education = Column(String(length=20), nullable=False)
# 岗位性质
jobNature = Column(String(length=20), nullable=True)
# 公司类型
financeStage = Column(String(length=30), nullable=True)
# 公司规模
companySize = Column(String(length=30), nullable=True)
# 业务方向
industryField = Column(String(length=30), nullable=True)
# 所在城市
city = Column(String(length=10), nullable=False)
# 岗位标签
positionAdvantage = Column(String(length=200), nullable=True)
# 公司简称
companyShortName = Column(String(length=50), nullable=True)
# 公司全称
companyFullName = Column(String(length=200), nullable=True)
# 公司所在区
district = Column(String(length=20), nullable=True)
# 公司福利标签
companyLabelList = Column(String(length=200), nullable=True)
# 工资
salary = Column(String(length=20), nullable=False)
# 抓取日期
crawl_date = Column(String(length=20), nullable=False)
if __name__ == '__main__':
# 创建数据表
Lagoutables.metadata.create_all(engine)
|
987,444 | cd51a7b6ac26c76ce4baf8bd38ecafbaa9e3a5d8 | #
# hw3pr1.py
#
# lab problem - matplotlib tutorial (and a bit of numpy besides...)
#
# this asks you to work through the first part of the tutorial at
# www.labri.fr/perso/nrougier/teaching/matplotlib/
# + then try the scatter plot, bar plot, and one other kind of "Other plot"
# from that tutorial -- and create a distinctive variation of each
#
# include screenshots or saved graphics of your variations of those plots with the names
# + plot_scatter.png, plot_bar.png, and plot_choice.png
#
# Remember to run %matplotlib at your ipython prompt!
#
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
#
# in-class examples...
#
def inclass1():
"""
Simple demo of a scatter plot.
"""
import numpy as np
import matplotlib.pyplot as plt
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.show()
#
# First example from the tutorial/walkthrough
#
#
# Feel free to replace this code as you go -- or to comment/uncomment portions of it...
#
def example1():
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C,S = np.cos(X), np.sin(X)
plt.plot(X,C)
plt.plot(X,S)
plt.show()
#
# Here is a larger example with many parameters made explicit
#
def example2():
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Create a new figure of size 8x6 points, using 100 dots per inch
plt.figure(figsize=(8,6), dpi=80)
# Create a new subplot from a grid of 1x1
plt.subplot(111)
X = np.linspace(-np.pi, np.pi, 256,endpoint=True)
C,S = np.cos(X), np.sin(X)
# Plot cosine using blue color with a continuous line of width 1 (pixels)
plt.plot(X, C, color="blue", linewidth=1.0, linestyle="-")
# Plot sine using green color with a continuous line of width 1 (pixels)
plt.plot(X, S, color="green", linewidth=1.0, linestyle="-")
# Set x limits
plt.xlim(-4.0,4.0)
# Set x ticks
plt.xticks(np.linspace(-4,4,9,endpoint=True))
# Set y limits
plt.ylim(-1.0,1.0)
# Set y ticks
plt.yticks(np.linspace(-1,1,5,endpoint=True))
# Save figure using 72 dots per inch
# savefig("../figures/exercice_2.png",dpi=72)
# Show result on screen
plt.show()
#
# using style sheets:
# # be sure to import matplotlib
# # list of all of them: matplotlib.style.available
# # example of using one: matplotlib.style.use( 'seaborn-paper' )
#
def scatterPlot1():
""" scatterPlot1 randomly produces a scatter plot that follows
the online tutorial challenge.
"""
n = 1024
X = np.random.normal(0,1,n)
Y = np.random.normal(0,1,n)
T = np.arctan2(Y,X)
plt.axes([0.025,0.025,0.95,0.95])
plt.scatter(X,Y, s=75, c=T, alpha=.5)
plt.xlim(-1.5,1.5), plt.xticks([])
plt.ylim(-1.5,1.5), plt.yticks([])
# savefig('../figures/scatter_ex.png',dpi=48)
plt.show()
def scatterPlot2():
""" scatterPlot2 produces a variation of scatterPlot1.
"""
N = 100
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
plt.scatter(x, y, c=colors, alpha=0.5)
plt.show()
# scatterPlot2()
def barPlot1():
""" barPlot produces a bar plot that follows the online tutorial challenge.
"""
n = 12
X = np.arange(n)
Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)
Y2 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)
plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x,y in zip(X,Y1):
plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')
for x,y in zip(X,Y2):
plt.text(x+0.2, -y-0.1, '%.2f' % y, ha='center', va= 'bottom')
plt.ylim(-1.25,+1.25)
plt.show()
# barPlot()
def barPlot2():
""" barPlot2 produces a variation of barPlot1.
"""
n = 10
X = np.arange(n)
Y1 = (1-X/float(n)) * np.random.uniform(0.5,1.0,n)
plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
for x,y in zip(X,Y1):
plt.text(x+0.2, y+0.05, '%.2f' % y, ha='center', va= 'bottom')
plt.ylim(0,1.25)
plt.show()
barPlot2()
def pieChart1():
""" pieChart1 produces a pie Chart that follows the online tutorial challenge.
"""
n = 20
Z = np.ones(n)
Z[-1] *= 2
plt.axes([0.025,0.025,0.95,0.95])
plt.pie(Z, explode=Z*.05, colors = ['%f' % (i/float(n)) for i in range(n)])
plt.gca().set_aspect('equal')
plt.xticks([]), plt.yticks([])
# savefig('../figures/pie_ex.png',dpi=48)
plt.show()
def pieChart2():
""" pieChart2 produces a pie chart with 4 variables and different colors.
"""
n = 4
Z = np.ones(n)
Z[-1] *= 2
labels = 'A', 'B', 'C', 'D'
plt.axes([0.025,0.025,0.95,0.95])
plt.pie(Z, explode=Z*.05, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
plt.gca().set_aspect('equal')
plt.xticks([]), plt.yticks([])
# savefig('../figures/pie_ex.png',dpi=48)
plt.show()
|
987,445 | 0428d96bae6c77581af1eda3b30c60cc9814fe3f | from collections import Counter
n = int(input())
a = list(map(int, input().split()))
c = Counter(a)
ans = n * (n - 1) // 2
for k, v in c.most_common():
ans -= v * (v - 1) // 2
print(ans)
|
987,446 | 18132835bec98de592022233720e2cd39501884f | import socket
import json
import time
data = {'message':'hello world!', 'test':123.4}
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for i in range(10):
s.connect(('127.0.0.1', 9999))
time.sleep(2)
s.send(json.dumps(data))
result = json.loads(s.recv(1024))
print result
s.close()
|
987,447 | 7665f26c221eb98351ff9ef571fd63435c85a19e | import sys
import os
from tinytag import TinyTag
from itertools import groupby
def get_args():
"""
artist_name = sys.argv[1]
"""
return sys.argv[1]
def get_mp3_files(directory_path):
for root, dirs, filenames in os.walk(directory_path, topdown=True):
for filename in filenames:
if filename.endswith('.mp3'):
yield os.path.abspath(os.path.join(root, filename))
def create_music_collection(mp3_files):
collection = []
for mp3_file in mp3_files:
collection.append([mp3_file, TinyTag.get(mp3_file)])
return collection
def filter_collection_by_criteria(collection, criteria):
return [
[_path, tags]
for _path, tags in collection
if tags.artist == criteria
]
def group_collection_by_album(collection):
for group, items in groupby(collection, key=lambda item: item[1].album):
yield group, list(items)
def pretty_print(_grouped_collection):
for album, files in _grouped_collection:
print(album)
for file_counter, [_path, tags] in enumerate(files, 1):
print('{:<4}{}. "{}" {} ({})'.format(
' ',
file_counter,
tags.title,
beatufy_duration(tags.duration),
_path))
def beatufy_duration(duration):
minutes = duration / 60
seconds = duration % 60
return '{}:{}'.format(str(minutes)[:1], str(seconds)[:2])
if __name__ == '__main__':
artist = get_args()
path = 'music'
mp3_collection = create_music_collection(get_mp3_files(path))
if not mp3_collection:
exit('No mp3 files found')
filtered_collection = filter_collection_by_criteria(mp3_collection, artist)
grouped_collection = group_collection_by_album(filtered_collection)
pretty_print(grouped_collection)
|
987,448 | 6ffc3505662b7bacec302342286271f2fbea5d2e | from .symbolics import * # noqa: F401
from .geometry import * # noqa: F401
from .distance import * # noqa: F401
from .stencils import * # noqa: F401
from .topography import * # noqa: F401
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
|
987,449 | 0d7206392b7a84b809fc52678737df549728ef24 | from brownie import accounts
def deploy_simple_storage():
account = accounts[0]
print(account)
def main():
deploy_simple_storage() |
987,450 | 95011677285583367ace88e83c5869fd7c84169f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 14:51:02 2019
@author: kyleschneider
"""
import pandas as pd
import numpy as np
# Where you load in CSV file
dataset = pd.read_csv('bestbath.com_organic_keywords.csv',encoding = 'unicode_escape')
#Creates a dataframe filtered to the position you'd like
def createDataWithPositionSpecs(min_p,max_p):
cp = dataset.loc[(dataset['Position'] >= min_p) & (dataset['Position'] <= max_p)]
return cp
cp = createDataWithPositionSpecs(11,29)
# Creates the total volume for 'Search Volume' column
def totalVolume(x):
total = x['Search Volume'].sum()
return total
totV = totalVolume(cp)
# Turns search volume into a percentage
k = (cp['Search Volume']/totV)*100
# Make booleans for filter parameters, change if statement for choice parameters
newData = []
for value in cp['Search Volume']:
if (value/totV)*100 >= .5:
newData.append(True)
else:
newData.append(False)
# add new columns variables to data frame
cp['Volume Ratio'] = k
cp['Look into']= newData
# creates final dataframe
finalist = cp.loc[cp['Look into'] == True]
# Exports final dataframe to a csv file
# Use: finalist.to_csv(r'Path where you want to store the exported CSV file\File Name.csv')
# to select file path
finalist.to_csv(r'Desktop\keyword_research\cody_keyword_research3.csv')
|
987,451 | 5c5dc2e507705ce3d8559d4515843a1a291ff689 | import pandas as pd
import matplotlib.pyplot as plt
# Create a DataFrame based on a dictionary.
d = {'timeIndex': [1, 1, 1, 1, 1, 1, 1, 2, 2, 2], 'isZero': [0,0,0,1, 1, 1, 1 ,0,1,0],
'isOne':[99,98,99,88,78,89,96,99,97,93], 'xTimes':[0,1,2,3,4,5,6,7,8,9]}
df = pd.DataFrame(data=d)
#print(df)
# Draw the first axis.
#ax1 = df['isZero'].plot(x='xTimes', color='blue', grid=True, label='Count')
ax1 = df['isZero'].plot(x='xTimes', color='blue', grid=True)
# Draw the second axis.
ax2 = df['isOne'].plot(x='xTimes', color='red', grid=True, secondary_y=True, label='Plot 2')
# Set legend label here.
# Set location to upper-left
ax1.legend(['Plot 1'],loc=1)
# Legend label set above at plot()
# Location is coordinates.
#ax2.legend(loc=2)
ax2.legend(loc=(0,1.02))
# Adjust bottom margin
#plt.subplots_adjust(bottom = .20)
# Display the plot.
plt.show()
|
987,452 | 2bc77f07a752cbabb7485a0988b7cda22d13866b | #!/usr/bin/env python
# __author__ = 'RCSLabs'
from gi.repository import Gtk
win = Gtk.Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
987,453 | f5721cad3c7a6316a9b060d3f2bc686d0c024258 | # Writes a JSON to a file
# json.dump() method is used to write the json string to a writeable object
import json
data = {}
data['people']=[]
data['people'].append({"Name":"John Doe",
"Age":27,
"Website":"www.john-d.com"})
data['people'].append({"Name":"Brian Mosers",
"Age":34,
"Website":"www.automatewithbrian.org"})
data['people'].append({"Name":"Dr.Dre",
"Age":45,
"Website":"www.kingdre.au"})
print(data)
with open('json_testfile.txt','w') as f:
json.dump(data,f,indent=4)
|
987,454 | fc3dcbed4458a4a4133e7779f722c17d56969338 | ''' README
Takes a text file as input and creates a numpy array of glove embeddings for each instance in the text file.
Also creates a dictionary mapping each word in the text file to a 300 dimensional vector embedding.
Steps to perform before running:
1) Make sure you have numpy package installed for python.
2) Download glove 300 dimensional embeddings from this url (nlp.stanford.edu/data/glove.840B.300d.zip)
3) UnZip the file and name it as 'glove.840B.300d.txt'
4) The unzipped file along with the input text file must reside in the same directory as your code.
5) Input text file path to be set in inputtext variable in line#24
6) To get a written text file of glove vector embeddings for text file instances, set the output file
path in outputwrite variable in line#25
7) To store the vectors mapped to the words in our input file in a dictionary, set the output file
path in outputvocabdict variable in line#26
8) To store the vectors mapped to the text instances in our input file in a dictionary, set the output file
path in outputinstancedict variable in line#27
9) To create glove vectors for training text file, follow the steps 1-4 and run this file as it is.
10)To create glove vectors for test text file,comment the lines 24-27 and uncomment the lines 28-31 and
run this file as it is.
'''
import numpy as np
inputtext='training_text'
outputwrite='Embeddings_glove_train.txt'
outputvocabdict='embeddingmapglove_train.npy'
outputinstancedict='output_train_glove.npy'
#inputtext='stage2_test_text.csv'
#outputwrite='Embeddings_glove_test.txt'
#outputvocabdict='embeddingmapglove_test.npy'
#outputinstancedict='output_test_glove.npy'
glovedictinput='glove.840B.300d.txt'
embeddings=[]
print "Find no_of_instances"
no_of_instances=0
fp = open(inputtext)
next(fp)
for line in fp:
no_of_instances=no_of_instances+1
fp.close()
print no_of_instances
print "\n"
print "Finding the dimension of the embeddings"
dimension=300
print dimension
print "\n"
#Creating dict
glove=dict()
print "mappping embeddings\n"
fp=open(glovedictinput)
i=0
for line in fp:
values=line.rstrip('\n').split(' ')
word=values[0]
word=word.rstrip('.|,|;|:|\'|\"|)|}|]')
word=word.lstrip('\'|\"|(|[|{')
glove[word]=values[1:len(values)]
i=i+1
if i%100000==0:
print i
fp.close()
print "mapped embeddings\n"
embeddingmap=dict()
fp = open(inputtext)
next(fp)
i=0
for line in fp:
values=line.split('||')
docs=values[1].split(' ')
for word in docs:
temp=word
word=word.rstrip('.|,|;|:|\'|\"|)|}|]')
word=word.lstrip('\'|\"|(|[|{')
x=glove.get(word)
if x is not None:
embeddingmap[word]=x
#print word
i=i+1
if i%50==0:
print i
fp.close()
print "Embedding_map done"
output_glove_train=[]
fp = open(inputtext)
wfp = open(outputwrite, 'w')
next(fp)
i=0
for line in fp:
allsum=np.zeros((dimension,),dtype="float32")
count=0
values=line.split('||')
docs=values[1].split(' ')
for word in docs:
word=word.rstrip('.|,|;|:|\'|\"|)|}|]')
word=word.lstrip('\'|\"|(|[|{')
x=embeddingmap.get(word)
if x is not None:
y=[float(v) for v in x]
allsum=np.add(allsum, y)
count=count+1
if count!=0:
wfp.write("%s\n" % np.divide(allsum,count))
output_glove_train.append(np.divide(allsum,count))
else:
wfp.write("%s\n" % allsum)
output_glove_train.append(allsum)
i=i+1
if i%50==0:
print i
np.save(outputvocabdict,embeddingmap)
np.save(outputinstancedict,output_glove_train)
print "done"
'''
print "Finding all words in corpus"
fp = open('/home/sasank/BigData/training_text')
next(fp)
words=[]
for line in fp:
values=line.split('||')
docs=values[1].split(' ')
for word in docs:
word=word.rstrip('.|,|;|\s|:|\'|\"')
word=word.lstrip('\'|\"')
if(len(word)>0):
words.append(word)
fp.close()
print "Found all words in corpus\n"
'''
'''
print "finding only the necessary vocab from word2vec corpus"
fp = open('/home/sasank/BigData/ri-3gram-400-tsv/vocab.tsv')
indices=[]
vocabs=[]
i=0
for line in fp:
values=line.split(' ')
if values[0] in words:
indices.append(i)
vocabs.append(values[0])
i=i+1
print i
fp.close()
print "found only the necessary vocab from word2vec corpus\n"
print "Embeddings Finding\n"
embeddings=zeros(no_of_instances,dimension)
'''
'''
for x in d1.keys():
print x
print len(d1.get(x))
print (d1.get(x))
exit()
'''
|
987,455 | 5025a590ee49a8a88e425e811245ff0dd2030b02 | # -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
lua_script = """
function main(splash)
assert(splash:go(splash.args.url))
while not splash:select('.quote') do
splash:wait(0.1)
end
return {html = splash:html()}
end
"""
class QuotesTestSpider(scrapy.Spider):
name = 'quotes_test'
allowed_domains = ['quotes_test.com']
start_urls = ['http://quotes.toscrape.com/js',]
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse, endpoint='render.html', args={'lua_source':lua_script}, meta={'name':'Hello'})
def parse(self, response):
self.logger.info(response.meta['name'])
if response:
quotes = response.xpath("//div[@class='quote']")
for quote in quotes:
text = quote.xpath("./span[@class='text']/text()").extract_first()
yield {'quote': text }
else:
pass
|
987,456 | 31d53862ea16aa429689da6702bdd69293566cc5 | from .models import VM, Backup, Profile
from .serializers import VMSerializer, BackupSerializer, ProfileSerializer
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework import generics
from .permissions import IsOwnerOrReadOnly
from rest_framework.response import Response
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework.decorators import detail_route, api_view
from rest_framework import status
from django.shortcuts import render
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from . import models, utils, utilsH, utilsK, utilsKB, utilsEB, utilsHB, backup_kvm, backup_hyperv, backup_esx, \
restore_esx, restore_kvm, restore_hyperv
from . import global_variables as gv
import pdb, datetime
import json
#This comment is to prove PyCharm is good and Nachi is bad
@api_view(['GET', 'POST'])
def vm_list(request, hv, util, ip, password, user, vmname, format=None):
if util == 'backup':
if request.method == 'GET':
if hv == "kvm":
l = []
gv.kvm_ip = ip
list_VM = utilsK.main(ip, user, password)
print list_VM
for vm in list_VM:
#d = models.Details(hyper_type='KVM', ip_addr=ip, username=user, password=password)
db, created = models.Details.objects.get_or_create(hyper_type='KVM', ip_addr=ip, username=user,
password=password)
#print db
#print created
if created == False:
db.save()
virt_mach = VM(VM_name=vm[1],
details=db,
VM_id=vm[0],
hyper_type="KVM",
state=vm[2],
guest_name="",
ip=ip,
).save()
vms = VM.objects.get(hyper_type="KVM", VM_id=vm[0])
l.append(vms)
serializer = VMSerializer(l, many=True)
return Response(serializer.data)
elif hv == "esx":
gv.esx_ip = ip
gv.esx_password = password
gv.esx_username = user
# ip="192.168.32.98"
# password="gsLab123"
# user="sumitt@ad2lab.com"
list_VM = utils.main(ip, password, user)
# print "IN VMLIST"
for vm in list_VM:
#d = models.Details(hyper_type='KVM', ip_addr=ip, username=user, password=password)
db, created = models.Details.objects.get_or_create(hyper_type='KVM', ip_addr=ip, username=user,
password=password)
#print db
#print created
if created == False:
db.save()
if vm is not None:
VM(VM_id=vm[0],
details=db,
VM_name=vm[0],
hyper_type="ESX",
guest_name=vm[2],
state=vm[3],
ip=vm[4],
).save()
vms = VM.objects.filter(hyper_type="ESX")
serializer = VMSerializer(vms, many=True)
return Response(serializer.data)
elif hv == "hyperv":
l = []
gv.hyperv_ip = ip
gv.hyperv_password = password
gv.hyperv_username = user
list_VM = utilsH.main(ip, user, password)
#print "hgfhgfhgf", list_VM
for vm in list_VM:
#d = models.Details(hyper_type='KVM', ip_addr=ip, username=user, password=password)
db, created = models.Details.objects.get_or_create(hyper_type='KVM', ip_addr=ip, username=user,
password=password)
#print db
#print created
if created == False:
db.save()
if vm is not None:
VM(VM_name=vm[0],
details=db,
VM_id=vm[0],
hyper_type="HyperV",
guest_name="",
ip="",
state=vm[1],
).save()
vms = VM.objects.filter(hyper_type="HyperV", VM_id=vm[0])
l.append(vms)
#print l
serializer = VMSerializer(l, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if hv == "kvm":
bkupserializer = BackupSerializer(data=request.data)
# print request.data
if bkupserializer.is_valid():
vm = VM.objects.get(VM_id=request.data['VM_name'])
bkupID = backup_kvm.main(ip, request.data['backup_name'], request.data['VM_name'],
vm.profile.freq_count, user, password)
print bkupID
print "AFA"
Backup(vm=vm,
backup_name=request.data['backup_name'],
bkupid=bkupID,
VM_name=str(request.data['VM_name']),
).save()
# print request.data
return Response(bkupserializer.data, status=status.HTTP_201_CREATED)
else:
return Response(bkupserializer.errors, status=status.HTTP_400_BAD_REQUEST)
if hv == "esx":
bkupserializer = BackupSerializer(data=request.data)
if bkupserializer.is_valid():
vm = VM.objects.get(VM_id=request.data['VM_name'])
backup_esx.main(ip, password, user, request.data['VM_name'], request.data['backup_name'])
Backup(vm=vm,
backup_name=request.data['backup_name'],
VM_name=request.data['VM_name'],
).save()
return Response(bkupserializer.data, status=status.HTTP_201_CREATED)
else:
return Response(bkupserializer.data, status=status.HTTP_400_BAD_REQUEST)
if hv == "hyperv":
bkupserializer = BackupSerializer(data=request.data)
if bkupserializer.is_valid():
vm = VM.objects.get(VM_id=str(request.data['VM_name']))
ver=backup_hyperv.main(gv.hyperv_ip, gv.hyperv_password, gv.hyperv_username, request.data['VM_name'])
Backup(vm=vm,
backup_name=request.data['backup_name'],
bkupID=ver,
).save()
return Response(bkupserializer.data, status=status.HTTP_201_CREATED)
else:
return Response(bkupserializer.data, status=status.HTTP_400_BAD_REQUEST)
elif util == 'restore':
if request.method == 'GET':
if hv == "kvm":
vm_obj = VM.objects.get(VM_id=vmname)
print "=================================="
# list_bkups = utilsKB.main(ip, user, password, vmname)
# print ip, user, password, vmname
backups = Backup.objects.filter(vm=vm_obj)
serializer = BackupSerializer(backups, many=True)
return Response(serializer.data)
elif hv == "esx":
bkuplist = utilsEB.main('test_TSAM')
vm_obj = VM.objects.get(VM_name='test_TSAM')
for bkup in bkuplist:
if bkup is not None:
Backup(vm=vm_obj,
backup_name=str(bkup[0]),
VM_name='test_TSAM'
).save()
backups = Backup.objects.filter(vm=vm_obj)
serializer = BackupSerializer(backups, many=True)
return Response(serializer.data)
elif hv == "hyperv":
# pdb.set_trace()
bkuplist = utilsHB.main('D')
for bkup in bkuplist:
if bkup is not None:
vm_obj = VM.objects.get(VM_name=bkup[1])
Backup(vm=vm_obj,
backup_name=str(bkup[0]),
VM_name=str(bkup[1]),
).save()
backups = Backup.objects.all()
serializer = BackupSerializer(backups, many=True)
return Response(serializer.data)
elif request.method == 'POST':
if hv == "kvm":
restore_kvm.main(str(request.data['VM_name']), str(request.data['backup_name']))
return Response(status=status.HTTP_201_CREATED)
if hv == "esx":
# pdb.set_trace()
# bkupserializer = BackupSerializer(data=request.data)
# if bkupserializer.is_valid():
restore_esx.main(str(request.data['VM_name']), str(request.data['backup_name']))
return Response(status=status.HTTP_201_CREATED)
# else:
# return Response(bkupserializer.data, status=status.HTTP_400_BAD_REQUEST)
if hv == "hyperv":
# pdb.set_trace()
restore_hyperv.main('D', str(request.data['backup_name']), str(request.data['VM_name']))
return Response(status=status.HTTP_201_CREATED)
@api_view(['GET', 'POST'])
def createPolicy(request, startDay, startMonth, startYear, endDay, endMonth, endYear, bckrotation, format=None):
if request.method == 'GET':
d = models.Profile()
d.start_date = datetime.date(int(startYear), int(startMonth), int(startDay))
d.end_date = datetime.date(int(endYear), int(endMonth), int(endDay))
d.freq_count = int(bckrotation)
d.del_count = 4
d.save()
serializer = ProfileSerializer(d)
return Response(serializer.data)
def conPolicy(request, policyID, vmID):
policy = Profile.objects.get(pk=policyID)
vm = VM.objects.get(VM_id=vmID)
vm.profile = policy
vm.save()
@api_view(['GET', 'PUT', 'DELETE'])
def vm_detail(request, hv, name, format=None):
try:
vm = VM.objects.get(hyper_type=hv, VM_name=name)
except VM.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = VMSerializer(vm)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = VMSerializer(vm, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
vm.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# def set_policy(request):
"""
string=""
@api_view(['GET', 'POST'])
def getHv(request, hv):
if request.method=='GET':
self.string=hv
class VMViewSet(viewsets.ModelViewSet):
if string == "kvm":
vms=VM()
list_VM = utilsK.main()
for vm in list_VM:
vm = VM(VM_name=vm[1],
VM_id=vm[0],
hyper_type="KVM",
state=vm[2],
disk_location="",
guest_name="",
ip="",
backup_content="",
).save()
vms = VM.objects.filter(hyper_type="KVM")
queryset = vms
serializer_class = VMSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
if string == "esx":
vms=VM()
list_VM = utilsK.main()
for vm in list_VM:
VM(VM_name=vm[0],
hyper_type="ESX",
disk_location=vm[1],
guest_name=vm[2],
state=vm[3],
ip=vm[4],
backup_content="",
).save()
vms=VM.objects.filter(hyper_type="ESX")
queryset = vms
serializer_class = VMSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
if string == "hyperv":
vms=VM()
list_VM = utilsK.main()
for vm in list_VM:
if vm is not None:
VM(VM_name=vm[0],
hyper_type="HyperV",
disk_location="",
guest_name="",
ip="",
backup_content="",
state=vm[1],
).save()
vms=VM.objects.filter(hyper_type="HyperV")
queryset = vms
serializer_class = VMSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer"""
|
987,457 | 62ee98079828fcfead28c48df14a64cfa63fadd9 | from tests.util import utils
from plaza_routing import config
from plaza_routing.integration import geocoding_service
def mock_geocode(monkeypatch):
monkeypatch.setattr(geocoding_service, "_query",
lambda payload:
utils.get_json_file(_get_geocode_filename(payload), 'geocoding'))
def _get_geocode_filename(params):
if params['q'] == 'Oberseestrasse 10, Rapperswil-Jona':
return 'geocoding.json'
elif params['q'] == 'Hansmusterweg 14, Zürich':
return 'geocoding_no_coordinates_found.json'
elif params['q'] == 'Sir Matt Busby Way, Stretford, Manchester M16 0RA':
return 'geocoding_outside_viewbox.json'
def mock_geocoding_unavailable_url(monkeypatch):
monkeypatch.setattr(config, "geocoding",
utils.mock_value_in_dict(config.geocoding,
"geocoding_api",
"https://nominatim.offline.openstreetmap.org/search"))
def mock_geocoding_wrong_url(monkeypatch):
monkeypatch.setattr(config, "geocoding",
utils.mock_value_in_dict(config.geocoding,
"geocoding_api",
"https://overpass.osm.ch/api/interpreter"))
|
987,458 | c60f69a6a911068a7d2e9459311bb0e419a54693 | from flask import Flask
from flask import request
import requests
import facebook
import urllib
import json
#https://teamtreehouse.com/community/can-someone-help-me-understand-flaskname-a-little-better
app = Flask(__name__)
FACEBOOK_APP_ID = '1664841416939079'
FACEBOOK_APP_SECRET = '70e0a8b22eccbab11ee6f3a8306152bd'
@app.route("/")
def hello():
#here request is request of flask not the requests library and it return the attribute specified
code = request.args.get('code')
# print(code)
#Exchanging Code for an Access Token
r=requests.get('https://graph.facebook.com/v2.12/oauth/access_token?client_id={}&redirect_uri={}&client_secret={}&code={}'.format(FACEBOOK_APP_ID,'http://localhost:8080/',FACEBOOK_APP_SECRET,code))
data = r.json()
#g=data['content'][0]
#print(g)
access_token=data['access_token']
graph = facebook.GraphAPI(access_token)
# me is of type dictionary
me= graph.request('/me')
id=me.get('id')
friends= graph.request('/me/friends')
data_friends=friends['data']
print(me)
print(friends)
print(data_friends)
return 'Hi pal'
#debug=True print out errors on the web page
app.run(host="0.0.0.0", port=int("8080"), debug=True)
|
987,459 | 608e3c05377998d8a03b15ba019f6a377eded154 | # coding: utf-8
from __future__ import unicode_literals, print_function
def assert_ipymarkup():
try:
import ipymarkup
except ImportError:
raise ImportError('pip install ipymarkup')
def get_markup_notebook(text, spans):
assert_ipymarkup()
from ipymarkup import BoxMarkup, Span
from IPython.display import display
spans = [Span(start, stop) for start, stop in spans]
return BoxMarkup(text, spans)
def show_markup_notebook(text, spans):
markup = get_markup_notebook(text, spans)
display(markup)
def show_markup(text, spans):
assert_ipymarkup()
from ipymarkup import AsciiMarkup, Span
spans = [Span(start, stop) for start, stop in spans]
markup = AsciiMarkup(text, spans)
for line in markup.as_ascii:
print(line)
def format_json(data):
import json
return json.dumps(data, indent=2, ensure_ascii=False)
def show_json(data):
print(format_json(data))
|
987,460 | 8d729913131bff94532ce64ca9ca5e50f25f2cc7 | def play_50():
n=int(input('Enter n:'))
for i in range(2,n//2):
if n%i==0:
return "yes"
return "no"
play_50()
|
987,461 | e9f48e3933f006646c08ba0ea785b29f3ce8b6c2 | class Solution: # 61, 91
def longestWord(self, words: List[str]) -> str:
# 这题很坑,必须要从单个字母开始才算
visited = set([""])
for w in sorted(words, key=len):
if w[:-1] in visited:
visited.add(w)
return min(visited, key=lambda w : (-len(w), w))
|
987,462 | e92499394d797fe5efdded894a75e0388d3c711d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ordenes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fecha', models.DateField(auto_now=True)),
('cantidad_de_piezas', models.CharField(max_length=10, choices=[(b'7', b'7 Piezas'), (b'16', b'16 Piezas')])),
('datos_de_piezas', models.CharField(max_length=100)),
('estado_de_orden', models.CharField(max_length=10, choices=[(b'Abierto', b'Abierto'), (b'Cerrado', b'Cerrado')])),
('jefe_de_linea', models.CharField(max_length=20)),
('usuario_de_almacen', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
987,463 | bbc9add0d25a200fa92b39b4275515644007b8ab | from sqlalchemy import (
create_engine, Column, Integer, String
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# executing the instructions from our localhost "chinook" db
db = create_engine("postgresql:///chinook")
base = declarative_base()
# create a class-based model for the "Programmer" table
class Programmer(base):
__tablename__ = "Programmer"
id = Column(Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
gender = Column(String)
nationality = Column(String)
famous_for = Column(String)
# instead of connecting to the database directly, we will ask for a session
# to create a new instance of sessionmaker, then point to our engine (the db)
Session = sessionmaker(db)
# opens an acutal session by calling the Session() subclass defined above
session = Session()
# creating the databse using declarative_base subclass
base.metadata.create_all(db)
# creating records on our Programmer table
ada_lovelace = Programmer(
first_name="Ada",
last_name="Lovelace",
gender="F",
nationality="British",
famous_for="First Programmer"
)
alan_turing = Programmer(
first_name="Alan",
last_name="Turing",
gender="M",
nationality="British",
famous_for="Modern Computing"
)
grace_hopper = Programmer(
first_name="Grace",
last_name="Hopper",
gender="F",
nationality="American",
famous_for="Cobol Language"
)
margaret_hamilton = Programmer(
first_name="Margaret",
last_name="Hamilton",
gender="F",
nationality="American",
famous_for="Apolo 11"
)
bill_gates = Programmer(
first_name="Bill",
last_name="Gates",
gender="M",
nationality="American",
famous_for="Microsoft"
)
tim_berners_lee = Programmer(
first_name="Tim",
last_name="Berners_lee",
gender="M",
nationality="British",
famous_for="World Wide Web"
)
ana_genover = Programmer(
first_name="Ana",
last_name="Genover",
gender="M",
nationality="Spanish",
famous_for="Code Institute"
)
# ADD EACH INSTANCE OF OUR PROGRAMMERS TO OUR SESSION
# session.add(ada_lovelace)
# session.add(alan_turing)
# session.add(grace_hopper)
# session.add(margaret_hamilton)
# session.add(bill_gates)
# session.add(tim_berners_lee)
# session.add(ana_genover)
# commit our session to the database
# session.commit()
# UPDATING A SINGLE RECOD
# programmer = session.query(Programmer).filter_by(id=7).first()
# programmer.famous_for = "World President"
# UPDATING MULTIPLE RECORDS
# people = session.query(Programmer)
# for person in people:
# if person.gender == "F":
# person.gender = "Female"
# elif person.gender == "M":
# person.gender = "Male"
# else:
# print("Gender not defined")
# session.commit()
# DELETING A SINGLE RECORD
# fname = input("Enter a first name: ")
# lname = input("Enter a last name: ")
# programmer = session.query(Programmer).filter_by(first_name=fname, last_name=lname).first()
# # defensive programming
# if programmer is not None:
# print("Programmer Found: ", programmer.first_name + " " + programmer.last_name)
# confirmation = input("Are you sure you want to delete this record? (y/n) ")
# if confirmation.lower() == "y":
# session.delete(programmer)
# session.commit()
# print("Programmer has been deleted")
# else:
# print("Programmer not deleted")
# else:
# print("No records found")
# DELETING MULTIPLE RECORDS
# programmer = session.query(Programmer)
# for programmer in programmers:
# session.delete(programmer)
# session.commit()
# query database to find all Programmers
programmers = session.query(Programmer)
for programmer in programmers:
print(
programmer.id,
programmer.first_name + " " + programmer.last_name,
programmer.gender,
programmer.nationality,
programmer.famous_for,
sep="|"
)
|
987,464 | 07410cf3635fe1ab8d5e44af847d62b1bbcc576d | class Person:
def __init__(self, firstname, lastname, age):
self.firstname = firstname
self.lastname = lastname
self.age = age
def getAge(self):
print("My age is " + self.age)
class Person:
def __init__(self, firstname, lastname, age):
self.firstname = firstname
self.lastname = lastname
self.age = age
def getAge(self):
print("My age is " + self.age)
class Student(Person):
pass
x = Student("Mike", "Olsen",20)
x.getAge()
class Bo:
def __init__(self):
#biến protected
self._a = 2
class Con(Bo):
def __init__(self):
Bo.__init__(self)
print("Calling protected member of Bo class: ")
print(self._a)
obj1 = Con()
obj2 = Bo()
print(obj2.a) # Lỗi vì không thể truy cập _a mặc dù là bố con
|
987,465 | 61cbaf34713ace4a4e292efff7690fa2f25b8b95 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ssl
import logging
import wsc.logging
from wsc.server.protocol import Adapter
from wsc.server.handler import ConnectionHandler
from wsc.server.manager import ConnectionsManger
from wsc.server.compatiblity import TCPServer, ThreadingMixIn
logger = logging.getLogger(__name__)
logging.basicConfig(**wsc.logging.config)
class Server(ThreadingMixIn, TCPServer):
"""
WebSocket server
"""
# Server options
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 8088
TLS_VERSION = ssl.PROTOCOL_TLSv1
# TCP Server options
allow_reuse_address = True
daemon_threads = True
request_queue_size = 100
def __init__(self, hostname=DEFAULT_HOST, port=DEFAULT_PORT, access_key=None, ssl_cert=None, ssl_key=None):
"""
Create server instance
:param hostname:
:param port:
:param access_key:
:param ssl_cert:
:param ssl_key:
"""
# SSL Context
self.tls_enabled = ssl_cert and ssl_key
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
if self.tls_enabled:
logger.info('SSL Mode enabled. Creating context...')
self.context = self._get_ssl_context()
# Run TCP Server
TCPServer.__init__(self, (hostname, port), ConnectionHandler)
# Connections manager
self.connections = ConnectionsManger()
self.access_key = (access_key or '').strip()
# Server information
logger.warning('Server started on {}://{}:{}'.format(
'wss' if self.tls_enabled else 'ws',
hostname,
port
))
logger.warning('Server API Access KEY: {}'.format(access_key))
logger.debug('Reuse ADDR:\t{}'.format(self.allow_reuse_address))
logger.debug('TLS Enabled:\t{}'.format(self.tls_enabled))
logger.debug('Daemon threads:\t{}'.format(self.daemon_threads))
logger.debug('Request queue size:\t{}'.format(self.request_queue_size))
def server_bind(self):
"""
Wrap TLS
:return:
"""
if self.tls_enabled:
logger.info('Wrapping socket using SSL...')
self.socket = self.context.wrap_socket(self.socket, server_side=True)
TCPServer.server_bind(self)
def _get_ssl_context(self):
"""
Creates new SSL context
:return:
"""
context = ssl.SSLContext(self.TLS_VERSION)
context.load_cert_chain(self.ssl_cert, self.ssl_key)
return context
@staticmethod
def set_option(opt_name, value):
"""
Updates static option for server
:param opt_name:
:param value:
:return:
"""
assert hasattr(Server, opt_name), (
"Attribute {} doesn't exists at "
"Server class"
).format(opt_name)
setattr(Server, opt_name, value)
|
987,466 | b6d32e9a636dcf9950281a4402c6547eb63e49fc | # This program is to print factorial
def factorial(num):
factor = 1
for i in range(1,num+1,1):
factor *= i
return factor
# main
if __name__ == "__main__":
nums=int(input("Enter the number for factorial: "))
print("factorial of ",nums," is ",factorial(nums))
|
987,467 | 593b0c0b6b7f57a870243602943e0bf85126a720 | from flask import Flask, render_template, request, redirect
import jinja2
import json
import uuid
app = Flask(__name__)
@app.route('/allcontacts')
def allcontacts():
with open('contact.json', 'r') as f:
data = f.read()
mylist = json.loads(data)
return render_template('allcontacts.html', contacts=mylist)
@app.route('/newcontact')
def newcontact():
return render_template('newcontacts.html')
@app.route('/viewcontact/<contact_id>')
def show(contact_id):
with open('contact.json','r') as fr:
contacts = json.loads(fr.read())
mycontact = None
for contact in contacts:
if contact['id'] == contact_id:
mycontact = contact
break
# print(mycontact)
return render_template('viewcontact.html', contact=mycontact)
@app.route('/edit/<contact_id>')
def editit(contact_id):
with open('contact.json','r') as fe:
contacts = json.loads(fe.read())
mycontact = None
for contact in contacts:
if contact['id'] == contact_id:
mycontact = contact
break
return render_template('editcontact.html',contact=mycontact)
@app.route('/editcontact/<contact_id>')
def change(contact_id):
with open('contact.json','r') as fe:
contacts = json.loads(fe.read())
index = None
for i,contact in enumerate(contacts):
if contact['id']==contact_id:
index = i
break
contacts[index].update(
{
'fname' : request.args.get('fname'),
'lname' : request.args.get('lname'),
'phone' : request.args.get('phone'),
'mail' : request.args.get('mail')
}
)
with open('contact.json','w') as fw:
fw.write(json.dumps(contacts,indent=4))
return redirect('/viewcontact/{}'.format(contact_id))
@app.route('/create')
def createit():
with open('contact.json', 'r') as f:
data = json.loads(f.read())
a = {
'fname': request.args.get('fname'),
'lname': request.args.get('lname'),
'phone': request.args.get('phone'),
'mail': request.args.get('mail'),
'id' : str(uuid.uuid4())
}
data.append(dict(a))
with open('contact.json', 'w') as f:
f.write(json.dumps(data, indent=4))
return redirect('/allcontacts')
# return 'creating'
@app.route('/delete/<contact_id>')
def deleteit(contact_id):
with open('contact.json','r') as fd:
contacts = json.loads(fd.read())
index = None
for i, contact in enumerate(contacts):
if contact['id'] == contact_id:
index = i
break
contacts.pop(index)
with open('contact.json','w') as fd:
fd.write(json.dumps(contacts,indent=4))
return redirect('/allcontacts')
app.run(host='0.0.0.0', port=8000, debug=True)
|
987,468 | a57a1ce85728ec2383004e1b85f8267c7b2bd55c | # N.B Before running, make sure:
# - all photon files are in a folder called 'photon'
# - the spacecraft file is named 'spacecraft.fits' and is in a folder called 'spacecraft'
# You have these 4 files in a folder called model:
# i) gll_iem_v05.fits
# ii) gll_iem_v05_rev1.fit
# iii) gll_psc_v08.fit
# iv) iso_source_v05_rev1.txt
# v) iso_clean_v05.txt
import gt_apps as my_apps
import os
bashCommand = "ls -1 ./photon/*.fits > filelist.list"
os.system(bashCommand)
spacecraftFile='./spacecraft/spacecraft.fits'
###############################################
############ Adjustable paramaters ############
###############################################
evClass=2
RA= 0
DEC=0
eMin=1000
eMax=300000
tMax='INDEF'
roi=180
irfsType='P7REP_SOURCE_V15' # or 'P7REP_SOURCE_V15' ?
# For binning
###############################################
##### Naming stuff #####
file = open('filelist.list', 'r')
name_temp=file.readline()
file.close()
name_type= name_temp[9:12] # Usually LAT
name_energy=str(int(eMin/1000))+'-'+str(int(eMax/1000))+'GeV' # i.e 100GeV
gtselectOutfile=name_type+'_allphotondata_'+name_energy+'.fits'
filteredLATFile = name_type+'_final_'+name_energy+'.fits'
ltCubeFile=name_type+'_ltCube_'+name_energy+'.fits'
expMapFile=name_type+'_expMap_'+name_energy+'_'+irfsType+'.fits'
modelFile=name_type+'_'+name_energy+'_model.xml'
# For binning
gtbinnedFile = name_type+'_binned3600_'+name_energy+'.fits'
expCubeFile=name_type+'_expCube_'+name_energy+'_'+irfsType+'.fits'
farithOutfile = name_type+'_farith_'+name_energy+'_'+irfsType+'.fits'
fimgtrimOutfile = name_type+'_fimgtrim_'+name_energy+'_'+irfsType+'.fits'
fcarithOutfile = name_type+'_corrmap_'+name_energy+'_'+irfsType+'.fits'
###############################################
# Quick check to see if any files already exist
normalFiles_temp=[gtselectOutfile,filteredLATFile,ltCubeFile,expMapFile,modelFile]
binningFiles_temp=[gtbinnedFile,expCubeFile,farithOutfile,fimgtrimOutfile,fcarithOutfile]
normalFiles=[]
binningFiles=[]
for x in normalFiles_temp:
if os.path.isfile(x):
normalFiles.append(x)
for y in binningFiles_temp:
if os.path.isfile(y):
binningFiles.append(y)
if len(normalFiles)>0 or len(binningFiles)>0:
print "WARNING: THESE FILES EXIST:"
if len(normalFiles)>0:
print '############# Normal Files #############'
for x in normalFiles:
print x
if len(binningFiles)>0:
print '############# Binning Files #############'
for x in binningFiles:
print x
print 'MAKE SURE YOU ARE USING THE RIGHT ONES/NOT OVERWRITING ANYTHING IMPORTANT!'
doIContinue = raw_input('Do you want to continue? (y/n):')
if doIContinue=='n' or doIContinue=='N':
print 'Exiting'
exit 1
###############################################
def printDictionaryToFile(dictionary):
fid=open('TSValues.txt','w')
for source,TS in dictionary.iteritems():
fid.write("%s : %f\n" %(source,TS))
fid.close()
os.system('mv TSValues.txt data')
# Start analysis
#Run gtselect
if not os.path.isfile(gtselectOutfile)
my_apps.filter['evclass'] = evClass
my_apps.filter['ra'] = RA
my_apps.filter['dec'] = DEC
my_apps.filter['rad'] = roi
my_apps.filter['emin'] = eMin
my_apps.filter['emax'] = eMax
my_apps.filter['zmax'] = 100
my_apps.filter['tmin'] = 'INDEF' # This is the earliest Fermi MET
my_apps.filter['tmax'] = tMax
my_apps.filter['infile'] = '@filelist.list'
my_apps.filter['outfile'] = gtselectOutfile
my_apps.filter.run()
#Run gtmktime
if not os.path.isfile(filteredLATFile)
my_apps.maketime['scfile'] = spacecraftFile
my_apps.maketime['filter'] = '(DATA_QUAL>0)&&(LAT_CONFIG==1)'
my_apps.maketime['roicut'] = 'no'
my_apps.maketime['evfile'] = gtselectOutfile
my_apps.maketime['outfile'] = filteredLATFile
my_apps.maketime.run()
# Make livetime cube
if not os.path.isfile(ltCubeFile)
my_apps.expCube['evfile'] = filteredLATFile
my_apps.expCube['scfile'] = spacecraftFile
my_apps.expCube['outfile'] = ltCubeFile
my_apps.expCube['zmax'] = 100
my_apps.expCube['dcostheta'] = 0.025
my_apps.expCube['binsz'] = 1
my_apps.expCube.run()
# Create binned map
my_apps.evtbin['algorithm'] = 'CCUBE'
my_apps.evtbin['evfile'] = filteredLATFile
my_apps.evtbin['outfile'] = gtbinnedFile
my_apps.evtbin['scfile'] = spacecraftFile
my_apps.evtbin['nxpix'] = 3600
my_apps.evtbin['nypix'] = 1800
my_apps.evtbin['binsz'] = 0.1
my_apps.evtbin['coordsys'] = 'GAL'
my_apps.evtbin['xref'] = 0
my_apps.evtbin['yref'] = 0
my_apps.evtbin['axisrot'] = 0.0
my_apps.evtbin['proj'] = 'AIT'
# Make an exposure cube
from GtApp import GtApp
expCube2 = GtApp('gtexpcube','Likelihood')
expCube2['expcube'] =ltCubeFile
expCube2['evfile'] = filteredLATFile
expCube2['cmfile'] ='NONE'
expCube2['outfile']= expCubeFile
expCube2['irfs']=irfsType
expCube2['nxpix']=3600
expCube2['nypix']=1800
expCube2['pixscale']=0.1
expCube2['coordsys']='GAL'
expCube2['xref']=0
expCube2['yref']=0
expCube2['axisrot']=0
expCube2['proj']='AIT'
expCube2['emin']=1000
expCube2['emax']=300000
expCube2['enumbins']=1
expCube2['bincalc']='CENTER'
# Correct the all-sky image for exposure and scale. Three steps
# Correct the value of each pixel for the exposure
os.system('farith infil1= %s infil2=%s ops=DIV outfil=%s' %(gtbinnedFile,expCubeFile,farithOutfile))
# Trim the pixels that were outside the Aitoff projection.
os.system('fimgtrim infile= %s threshlo=0 const_lo=0 threshup=INDEF outfile=%s' %(farithOutfile,fimgtrimOutfile))
# Scale the image so that the maximum pixel is equal to 255
os.system('fcarith infile= %s const=6.155e9 ops=MUL outfil=%s' %(fimgtrimOutfile,fcarithOutfile))
|
987,469 | 0c63e9e615b1fbda117c399559c04176b2298463 | ### serious exercise 4 part c
n_c = 9
print("c.", n_c, "stars and xs in total:")
# 1st solution:
print("1st solution")
c = ""
for i in range(n_c):
if i%2 == 0:
c += "x "
else:
c += "* "
print(c)
print()
# 2nd solution:
print("2nd solution")
for i in range(n_c // 2):
print("x *", end = " ")
if n_c % 2 == 1:
print("x")
print()
print() |
987,470 | 777d5938c9de8f30f8018dd88668e34429385355 | # -*- coding: utf-8 -*-
from tender_additional_data import *
from flask import abort
import core
from database import BidsTender, Tenders, db
from language.translations import alert
def validator_create_tender(data):
for field in range(len(create_tender_required_fields)):
if create_tender_required_fields[field] not in data:
abort(400, "Field '{}' is required. List of required fields: {}".format(create_tender_required_fields[field], create_tender_required_fields))
procurement_method = data["procurementMethodType"]
number_of_items = data["number_of_items"]
accelerator = data["accelerator"]
company_id = data['company_id']
received_tender_status = data['tenderStatus']
api_version = data['api_version']
platform_host = data['platform_host']
if 'number_of_lots' in data and len(data["number_of_lots"]) > 0:
number_of_lots = data["number_of_lots"]
if str(number_of_lots).isdigit() is False:
abort(400, 'Number of lots must be integer')
elif 0 > int(number_of_lots) or int(number_of_lots) > 20:
abort(422, 'Number of lots must be between 0 and 20')
if str(number_of_items).isdigit() is False:
abort(400, 'Number of items must be integer')
elif 1 > int(number_of_items) or int(number_of_items) > 20:
abort(422, 'Number of items must be between 1 and 20')
if procurement_method not in limited_procurement:
if "number_of_bids" in data:
number_of_bids = data["number_of_bids"]
if str(number_of_bids).isdigit() is True:
if 0 > int(number_of_bids) or int(number_of_bids) > 10:
abort(422, 'Number of bids must be between 0 and 10')
if str(number_of_bids).isdigit() is False and len(number_of_bids) > 0:
abort(400, 'Number of bids must be integer')
if str(accelerator).isdigit() is False:
abort(400, 'Accelerator must be integer')
elif 1 > int(accelerator) or int(accelerator) > 30000:
abort(422, 'Accelerator must be between 1 and 30000')
if procurement_method == 'belowThreshold':
if int(accelerator) > 14400:
abort(422, 'For belowThreshold accelerator value can\'t be greater than 14400')
if received_tender_status == 'active.qualification':
if int(accelerator) > 1440:
abort(422, 'For belowThreshold procedure in "active.qualification" accelerator value can\'t be greater than 1440')
if str(company_id).isdigit() is False:
abort(400, 'Company ID must be integer')
if int(company_id) == 0:
abort(422, 'Company id can\'t be 0')
if received_tender_status not in tender_status_list:
return abort(422, 'Tender status must be one of: {}'.format(tender_status_list))
if api_version not in list_of_api_versions:
return abort(422, 'API version must be one of: {}'.format(list_of_api_versions))
if platform_host not in core.get_list_of_platform_urls(1):
return abort(422, 'Platform must be one of: {}'.format(core.get_list_of_platform_urls(1)))
if procurement_method not in limited_procurement:
if 'skip_auction' not in data and received_tender_status not in ['active.tendering', 'active.pre-qualification', 'active.enquiries', 'complete']:
abort(422, '"skip_auction" must be checked for {} status'.format(received_tender_status))
# check procurement method
if procurement_method in above_threshold_procurement: # check allowed statuses for above threshold procurements
# check status for procedure
if procurement_method in without_pre_qualification_procedures:
if received_tender_status not in without_pre_qualification_procedures_status:
return abort(422, "For '{}' status must be one of: {}".format(procurement_method, without_pre_qualification_procedures_status))
elif procurement_method in prequalification_procedures:
if received_tender_status not in without_pre_qualification_procedures_status + prequalification_procedures_status:
return abort(422, "For '{}' status must be one of: {}".format(procurement_method, without_pre_qualification_procedures_status + prequalification_procedures_status))
elif procurement_method in competitive_procedures:
if procurement_method == 'competitiveDialogueUA':
if received_tender_status not in without_pre_qualification_procedures_status + prequalification_procedures_status + competitive_procedures_status:
return abort(422, "For '{}' status must be one of: {}".format(procurement_method, without_pre_qualification_procedures_status + prequalification_procedures_status + competitive_procedures_status))
else:
if received_tender_status not in without_pre_qualification_procedures_status + prequalification_procedures_status + competitive_procedures_status + competitive_dialogue_eu_status:
return abort(422, "For '{}' status must be one of: {}".format(procurement_method, without_pre_qualification_procedures_status + prequalification_procedures_status + competitive_procedures_status +
competitive_dialogue_eu_status))
elif procurement_method in below_threshold_procurement: # check allowed status for below threshold procedure
if received_tender_status not in below_threshold_status:
abort(422, "For '{}' status must be one of: {}".format(procurement_method, below_threshold_status))
elif procurement_method in limited_procurement: # check allowed status for limited procedures
if received_tender_status not in limited_status:
abort(422, "For '{}' status must be one of: {}".format(procurement_method, limited_status))
else: # incorrect procurementMethodType
abort(422, 'procurementMethodType must be one of: {}'.format(above_threshold_procurement + below_threshold_procurement + limited_procurement))
if int(accelerator) < 30:
if received_tender_status not in statuses_with_high_acceleration:
abort(422, 'Accelerator value can be less than 30 for the following statuses only: {}'.format(statuses_with_high_acceleration))
if procurement_method in negotiation_procurement and received_tender_status not in statuses_negotiation_with_high_acceleration:
abort(422, 'Accelerator value can be less than 30 for: {} for the following statuses only: {}'.format(negotiation_procurement, statuses_negotiation_with_high_acceleration))
if procurement_method == 'belowThreshold' and received_tender_status != 'active.enquiries':
abort(422, 'For {} accelerator value can be less than 30 for the following status only: {}'.format('"belowThreshold"', 'active.enquiries'))
def validator_create_monitoring(data):
for field in range(len(create_monitoring_required_fields)):
if create_monitoring_required_fields[field] not in data:
abort(400, "Field '{}' is required. List of required fields: {}".format(create_monitoring_required_fields[field], create_monitoring_required_fields))
if data['monitoringStatus'] in monitoring_status_list_with_high_acceleration and int(data['accelerator']) < 1440:
abort(422, 'Accelerator can\'t be less than 1440 for "{}" status'.format(data['monitoringStatus']))
def validator_add_tender_bid_to_company(bid_id, data):
list_of_bids = BidsTender.query.all()
list_bid = []
for tid in range(len(list_of_bids)):
list_bid.append(list_of_bids[tid].bid_id)
if bid_id not in list_bid:
abort(404, 'Bid id was not found in database')
if 'company-id' not in data:
abort(400, 'Company UID was not found in request')
if str(data['company-id']).isdigit() is False:
abort(400, 'Company UID must be integer')
if int(data['company-id']) == 0:
abort(422, 'Company id can\'t be 0')
return data
def validator_if_tender_id_short_in_db(tender_id_short):
list_of_tenders = Tenders.query.all()
list_tid = []
for tid in range(len(list_of_tenders)):
db.session.remove()
list_tid.append(list_of_tenders[tid].tender_id_short)
if tender_id_short not in list_tid:
abort(404, alert.error_404_not_found('alert_error_404_no_tender_id'))
return True
|
987,471 | 63f5cadd81ef1a99c4d6b469f00484ea4fa72462 | import re
from .models import WordStats
from .models import Bucket
from django.shortcuts import render
from django.utils import timezone
from review.models import Word
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
def play(request):
return render(request, 'play.html', {})
class PlayView(APIView):
permission_classes = [AllowAny]
def get(self, request):
words = [w.to_obj() for w in Word.objects.all()]
return Response(words)
class GuessView(APIView):
permission_classes = [AllowAny]
def _handle_incorrect(self, word):
word_stats = WordStats.objects.get(word=word)
word_stats.last_reviewed = timezone.now()
word_stats.bucket = Bucket.objects.first()
word_stats.know_status -= 1 if word_stats.know_status > 0 else 0
word_stats.save()
return word_stats
def _handle_correct(self, word):
word_stats = WordStats.objects.get(word=word)
word_stats.last_reviewed = timezone.now()
word_stats.times_right += 1
word_stats.know_status += 1
try:
new_bucket = Bucket.objects.get(id=word_stats.bucket.id + 1)
word_stats.bucket = new_bucket
except Bucket.DoesNotExist:
pass
word_stats.save()
return word_stats
def _is_correct(self, guess, translation):
return guess != '' and bool(re.search(r'\b%s\b' % re.escape(guess),
translation,
flags=re.IGNORECASE))
def post(self, request):
data = request.data
try:
word = Word.objects.get(id=int(data['wordId']))
except Word.DoesNotExist:
return Response({
'error': 'Can\'t find word with id %s' % data.id
})
if self._is_correct(data['english'], word.english):
word_stats = self._handle_correct(word)
else:
word_stats = self._handle_incorrect(word)
return Response({'knowStatus': word_stats.know_status})
|
987,472 | b1dc5f73266cf0b476051fcd72cbb0a2853af1ea | import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
# URI for this module
uri = "/iam/access/v8/alias_service"
requires_modules = ["federation"]
requires_version = "9.0.1.0"
def get(isamAppliance, sortBy=None, count=None, start=None, filter=None, check_mode=False, force=False):
"""
Retrieve alias associations
"""
return isamAppliance.invoke_get("Retrieve alias associations", "{0}{1}".format(uri, tools.create_query_string(
sortBy=sortBy, count=count, start=start, filter=filter)), requires_modules=requires_modules,
requires_version=requires_version)
def add(isamAppliance, username, federation_id, aliases, type=None, partner_id=None, check_mode=False, force=False):
"""
Create an alias association
TODO: Need to understand uniqueness of federation/partner_id and username to complete this
"""
warnings = ["Idempotency has not been coded for this function."]
if force is True or _check(isamAppliance, username) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
fed_id = federation_id
if partner_id is not None:
fed_id = fed_id + "|" + partner_id
json_data = {
"username": username,
"federation_id": fed_id,
"aliases": aliases
}
if type is not None:
json_data['type'] = type
return isamAppliance.invoke_post(
"Create an alias association", uri, json_data, warnings=warnings,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object()
def _check(isamAppliance, username):
"""
TODO: Code the idempotency check logic
"""
return False
|
987,473 | 37d9a58837489f5d62f7323cfb07e157d61d5076 | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tempfile
from importlib import import_module
from pathlib import Path
import click
import pytest
import yaml
from click.testing import CliRunner
from kedro.framework.cli.cli import cli
from kedro.framework.cli.starters import create_cli
_FAKE_REPO_NAME = "fake_repo"
_FAKE_PACKAGE_NAME = "fake_package"
@pytest.fixture
def fake_package_name():
return _FAKE_PACKAGE_NAME
@pytest.fixture(scope="session")
def fake_root_dir():
try:
with tempfile.TemporaryDirectory() as tmp_root:
yield Path(tmp_root)
# On Windows `PermissionError` is raised from time to time
# while `tmp_root` removing.
except PermissionError: # pragma: no cover
pass
@pytest.fixture(scope="session")
def fake_repo_path(fake_root_dir):
return fake_root_dir.resolve() / _FAKE_REPO_NAME
@pytest.fixture(scope="session")
def fake_repo_config_path(fake_root_dir):
repo_config = {
"output_dir": str(fake_root_dir),
"project_name": "Test Project",
"repo_name": _FAKE_REPO_NAME,
"python_package": _FAKE_PACKAGE_NAME,
}
config_path = fake_root_dir / "repo_config.yml"
with config_path.open("w") as fd:
yaml.safe_dump(repo_config, fd)
return config_path
@pytest.fixture(scope="session")
def fake_project_cli(fake_repo_path: Path, fake_repo_config_path: Path):
starter_path = Path(__file__).parents[2].resolve()
starter_path = starter_path / "features" / "steps" / "test_starter"
# This is needed just for the tests, those CLI groups are merged in our
# code when invoking `kedro` but when imported, they still need to be merged
kedro_cli = click.CommandCollection(sources=[cli, create_cli])
CliRunner().invoke(
kedro_cli,
["new", "-c", str(fake_repo_config_path), "--starter", str(starter_path)],
)
# NOTE: Here we load a couple of modules, as they would be imported in
# the code and tests.
# It's safe to remove the new entries from path due to the python
# module caching mechanism. Any `reload` on it will not work though.
old_path = sys.path.copy()
sys.path = [str(fake_repo_path), str(fake_repo_path / "src")] + sys.path
# `load_context` will try to `import fake_package`,
# will fail without this line:
import_module(_FAKE_PACKAGE_NAME)
yield import_module(f"{_FAKE_PACKAGE_NAME}.cli")
sys.path = old_path
del sys.modules[_FAKE_PACKAGE_NAME]
@pytest.fixture
def chdir_to_dummy_project(fake_repo_path, monkeypatch):
monkeypatch.chdir(str(fake_repo_path))
|
987,474 | 2998d164e23c3351fe2506199ee9a056e75e9761 | __author__ = 'Ingrid Marie'
import sqlite3
conn = sqlite3.connect('test.db')
c = conn.cursor()
def tableCreate():
c.execute('CREATE TABLE love (ID INT, name TEXT)')
def enterData():
c.execute("INSERT INTO love VALUES (6,' peter')")
enterData()
|
987,475 | 016f9e75365a325243c879bbf998bea12fb429e1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import tabun_api as api
from tabun_api.compat import text
from .. import core, user, worker
from ..db import db, get_db_last, set_db_last
def reader():
last_comment_time = get_db_last('last_comment_time')
# скачиваем комментарии
comments, pages = load_comments(last_comment_time)
if core.loglevel == core.logging.DEBUG:
core.logger.debug('Downloaded %d comments, last 10: %s', len(comments), ", ".join(text(x.comment_id) for x in comments[-10:]))
new_comments = []
comment_infos = get_comments_info(x.comment_id for x in comments)
oldest_comment_time = get_db_last('oldest_comment_time')
new_oldest_comment_time = None
new_last_comment_time = None
for comment in comments:
tm = (comment.utctime - datetime(1970, 1, 1)).total_seconds()
# слишком старые комментарии игнорируем
if tm < oldest_comment_time:
continue
if new_oldest_comment_time is None:
new_oldest_comment_time = tm
if new_last_comment_time is None or tm > new_last_comment_time:
new_last_comment_time = tm
comment_hash = comment_infos.get(comment.comment_id, (None,))[0]
if comment_hash:
# комментарий уже был обработан
new_comment_hash = comment.hashsum()
if new_comment_hash != comment_hash:
# Упс, коммент изменили
set_comment_info(comment.comment_id, tm, new_comment_hash)
worker.call_handlers('edit_comment', comment)
continue
comment_hash = comment.hashsum()
set_comment_info(comment.comment_id, tm, comment_hash)
# отправляем в другой поток на обработку
if comment.deleted:
worker.call_handlers("new_deleted_comment", comment)
else:
worker.call_handlers("new_comment", comment)
new_comments.append(comment)
# Для плагинов, желающих обработать все новые комменты в одном обработчике
worker.call_handlers("new_comments", new_comments)
if core.loglevel == core.logging.DEBUG:
core.logger.debug('New comments: %s', ', '.join(text(x.comment_id) for x in new_comments))
# стираем слишком старые комментарии
if new_oldest_comment_time is not None and new_oldest_comment_time != oldest_comment_time:
set_db_last('oldest_comment_time', new_oldest_comment_time)
clear_comment_info_older(new_oldest_comment_time)
if new_last_comment_time is not None and new_last_comment_time != last_comment_time:
set_db_last('last_comment_time', new_last_comment_time)
worker.call_handlers("comments_list", comments)
def load_comments(last_comment_time=None):
"""Скачивалка комментариев согласно конфигурации."""
urls = [x.strip() for x in core.config.get('comments', 'urls').split(',') if x.strip()]
raw_comments = []
pages = []
for url in urls:
# узнаём, сколько страниц нам разрешено качать
if '#' in url:
url, pages_count = url.split('#', 1)
if ':' in pages_count:
min_pages_count, pages_count = pages_count.split(':', 1)
min_pages_count = max(1, int(min_pages_count))
pages_count = max(1, int(pages_count))
else:
min_pages_count = 1
pages_count = max(1, int(pages_count))
else:
min_pages_count = 1
pages_count = 1
for page_num in range(1, pages_count + 1):
current_url = (url.rstrip('/') + ('/page%d/' % page_num)) if page_num > 1 else url
# комменты грузятся ОЧЕНЬ долго:
try:
raw_data = user.open_with_check(current_url, timeout=max(120, user.user.timeout))
except api.TabunError as exc:
# Лента может быть убита удалённым блогом; вытаскиваем что получится
if exc.code != 500:
raise
raw_data = exc.exc.read()
if raw_data.rstrip().endswith(b'<a href="') and b'<li class="comment-link">' in raw_data[-100:]:
core.logger.error('Comments error 500, trying to parse partially')
else:
raise
worker.call_handlers('raw_data', current_url, raw_data)
comments = sorted(user.user.get_comments(current_url, raw_data=raw_data).values(), key=lambda x: x.utctime)
raw_comments.extend(comments)
if page_num < 2:
pages.append(comments)
if not comments:
core.logger.error('Comments feed returned 0 comments, looks like impossible situation')
break
# не качаем то, что качать не требуется
tm = (comments[0].utctime - datetime(1970, 1, 1)).total_seconds()
if page_num >= min_pages_count and last_comment_time and tm < last_comment_time:
break
comment_ids = []
comments = []
for comment in sorted(raw_comments, key=lambda x: x.utctime):
if comment.comment_id not in comment_ids:
comments.append(comment)
comment_ids.append(comment.comment_id)
return comments, pages
def get_comments_info(comment_ids):
"""Возвращает словарь хэшей комментариев. Хэши не могут быть None, в отличие от постов."""
query = ', '.join(text(int(x)) for x in comment_ids)
hashes = db.query("select comment_id, hash from comments where comment_id in (%s)" % query)
return dict((x[0], x[1:]) for x in hashes)
def set_comment_info(comment_id, tm, comment_hash):
"""Сохраняет хэш комментария. Время нужно передавать для последующей чистки базы."""
db.execute("replace into comments values(?, ?, ?)", (int(comment_id), int(tm), comment_hash))
def clear_comment_info_older(tm):
"""Чистит базу от слишком старых комментариев, чтобы место не забивать."""
db.execute('delete from comments where tm < ?', (int(tm),))
def init_tabun_plugin():
db.init_table('comments', '(comment_id int not null primary key, tm int not null, hash text not null)')
worker.add_reader(reader)
|
987,476 | 33b6e0fd223e7613bde7e597105e8cc0ee0fb862 | # -*- coding: utf-8 -*-
u"""
============================
Unicode Case Folding Support
============================
This module generates a CASE_MAP dictionary which maps upper case characters to
lower case characters according to the Unicode 5.1 Specification:
>>> CASE_MAP[u'B']
u'b'
Note that some codepoints may lower case into multi-byte characters, e.g.
>>> CASE_MAP[u'ß']
u'ss'
"""
from os.path import abspath, dirname, exists, join as join_path
from sys import maxunicode
from plexnetenv import PLEXNET_LOCAL
# ------------------------------------------------------------------------------
# some konstants
# ------------------------------------------------------------------------------
CASE_MAP = {}
# ------------------------------------------------------------------------------
# utility funktions
# ------------------------------------------------------------------------------
def _generate_case_map():
global CASE_MAP
if CASE_MAP:
return
filepath = join_path(PLEXNET_LOCAL, 'share', 'unicode', 'case_folding.txt')
if not exists(filepath):
raise RuntimeError(
"Couldn't find Unicode Case Folding data: %r" % filepath
)
if maxunicode == 0xffff:
def _chr(s):
return eval("u'\U" + ('0' * (8 - len(s))) + s + "'")
else:
def _chr(s):
return unichr(int(s, 16))
with open(filepath, 'r') as case_data:
for line in case_data:
if line.startswith('#') or (not line.strip()):
continue
ori, typ, dst, cmt = line.split('; ')
if typ in ('C', 'F'):
CASE_MAP[_chr(ori)] = u''.join(_chr(char) for char in dst.split(' '))
_generate_case_map()
|
987,477 | 0811e3b365b462e1588a776b4570d50193001faa | from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
from .forms import UserCreationFormWithEmail
# Create your views here.
class SignUpView(generic.CreateView):
form_class = UserCreationFormWithEmail
success_url = reverse_lazy('login')
template_name = 'accounts/signup.html'
|
987,478 | aceb232618e9e65ad07200771183d4ccdd45db51 | #!/usr/bin/env python3
"""
Baum-Welch Algorithm module
"""
import numpy as np
def baum_welch(Observations, Transition, Emission, Initial, iterations=1000):
"""
performs the Baum-Welch algorithm for a hidden markov model:
- Observations: numpy.ndarray of shape (T,) that contains the index
of the observation
- T: number of observations
- Transition: numpy.ndarray of shape (M, M) that contains the initialized
transition probabilities
- M: number of hidden states
- Emission: numpy.ndarray of shape (M, N) that contains the initialized
emission probabilities
- N: number of output states
- Initial: numpy.ndarray of shape (M, 1) that contains the initialized
starting probabilities
- iterations: number of times expectation-maximization should be performed
Returns: the converged Transition, Emission, or None, None on failure
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return None, None
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
N, M = Emission.shape
T = Observations.shape[0]
if N != Transition.shape[0] or N != Transition.shape[1]:
return None, None
# iterations over 454 makes no difference in the output
# to check use np.isclose with atol=1e-5 in a and b (store a_prev)
if iterations > 454:
iterations = 454
a = Transition.copy()
b = Emission.copy()
for n in range(iterations):
alpha = forward(Observations, b, a, Initial.reshape((-1, 1)))
beta = backward(Observations, b, a, Initial.reshape((-1, 1)))
xi = np.zeros((N, N, T - 1))
for col in range(T - 1):
denominator = np.dot(np.dot(alpha[:, col].T, a) *
b[:, Observations[col + 1]].T,
beta[:, col + 1])
for row in range(N):
numerator = alpha[row, col] * a[row, :] *\
b[:, Observations[col + 1]].T * beta[:, col + 1].T
xi[row, :, col] = numerator / denominator
gamma = np.sum(xi, axis=1)
a = np.sum(xi, 2) / np.sum(gamma, axis=1).reshape((-1, 1))
# Add additional T'th element in gamma
gamma = np.hstack(
(gamma, np.sum(xi[:, :, T - 2], axis=0).reshape((-1, 1))))
denominator = np.sum(gamma, axis=1)
for k in range(M):
b[:, k] = np.sum(gamma[:, Observations == k], axis=1)
b = np.divide(b, denominator.reshape((-1, 1)))
return a, b
def forward(Observation, Emission, Transition, Initial):
"""
Performs the forward algorithm for a hidden markov model
"""
N, M = Emission.shape
T = Observation.shape[0]
alpha = np.zeros((N, T))
alpha[:, 0] = Initial.T * Emission[:, Observation[0]]
for col in range(1, T):
for row in range(N):
aux = alpha[:, col - 1] * Transition[:, row]
alpha[row, col] = np.sum(aux * Emission[row, Observation[col]])
# P = np.sum(alpha[:, -1])
return alpha
def backward(Observation, Emission, Transition, Initial):
"""
Performs the backward algorithm for a hidden markov model
"""
N, M = Emission.shape
T = Observation.shape[0]
beta = np.zeros((N, T))
beta[:, T - 1] = np.ones((N))
# Loop in backward way from T-1 to
# Due to python indexing the actual loop will be T-2 to 0
for col in range(T - 2, -1, -1):
for row in range(N):
beta[row, col] = np.sum(beta[:, col + 1] *
Transition[row, :] *
Emission[:, Observation[col + 1]])
# P = np.sum(Initial[:, 0] * Emission[:, Observation[0]] * beta[:, 0])
return beta
|
987,479 | 2de3044c028288a52bdaee5c622144544e8935d4 | from django.conf.urls import url
from . import views
app_name = 'salesman_mgr'
urlpatterns = [
url(r'^view/(?P<username>[\w.@+-]+)/$', views.viewStock, name='view_stock'),
url(r'^dashboard/(?P<username>[\w.@+-]+)/$', views.renderSalesman, name='dashboard'),
url(r'^history/(?P<username>[\w.@+-]+)/$', views.transactionHistory, name='viewTransactions'),
url(r'^update/(?P<username>[\w.@+-]+)/$', views.updateStock, name='update_stock'),
url(r'^update/(?P<username>[\w.@+-]+)/(?P<messages>[\w.@+-]+)/$', views.updateStock, name='update_stock'),
url(r'^updateInfo/(?P<username>[\w.@+-]+)/$', views.updateInfo, name='update_contact_detail'),
url(r'^sell/(?P<username>[\w.@+-]+)/$', views.sellStock, name='sell_stock'),
]
|
987,480 | 90fa8b3d724b621802d060b492c4563fe67e388d | # Copyright 2018 Francesco Ceccon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Underestimator for sum of other expressions."""
from suspect.expression import ExpressionType
from galini.core import SumExpression
from galini.expression_relaxation.expression_relaxation import ExpressionRelaxation, ExpressionRelaxationResult
class SumOfUnderestimators(ExpressionRelaxation):
def __init__(self, underestimators):
for underestimator in underestimators:
if not isinstance(underestimator, ExpressionRelaxation):
raise ValueError('All expression_relaxation must be instances of Underestimator')
self._underestimators = underestimators
def can_relax(self, problem, expr, ctx):
return (
self._can_be_underestimated_as_sum_of_expressions(problem, expr, ctx) or
self._can_be_underestimated_by_child_underestimator(problem, expr, ctx)
)
def relax(self, problem, expr, ctx, **kwargs):
if self._can_be_underestimated_as_sum_of_expressions(problem, expr, ctx):
return self._underestimate_as_sum(problem, expr, ctx, **kwargs)
for underestimator in self._underestimators:
if underestimator.can_relax(problem, expr, ctx):
return underestimator.relax(problem, expr, ctx, **kwargs)
return None
def _can_be_underestimated_as_sum_of_expressions(self, problem, expr, ctx):
if expr.expression_type == ExpressionType.Sum:
for child in expr.children:
if not self.can_relax(problem, child, ctx):
return False
return True
return False
def _can_be_underestimated_by_child_underestimator(self, problem, expr, ctx):
for underestimator in self._underestimators:
if underestimator.can_relax(problem, expr, ctx):
return True
return False
def _underestimate_as_sum(self, problem, expr, ctx, **kwargs):
new_children = []
new_constraints = []
for child in expr.children:
result = self.relax(problem, child, ctx, **kwargs)
if result is not None:
new_children.append(result.expression)
new_constraints.extend(result.constraints)
new_expression = SumExpression(new_children)
return ExpressionRelaxationResult(new_expression, new_constraints)
|
987,481 | b8e56b32d640e070bbe400b3dfccc1cb3ff2a298 | import time
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.common.by import By
def do_like(drv,id):
for i in range(0, 3, 1):
try:
npage = 2
#go to profile
# btn = drv.find_element_by_xpath("//a[@data-testid='blue_bar_profile_link']")
# btn.click()
drv.get("wwww.facebook.com/profile.php?id=" + id + "&sk=likes")
# get likes tab
html_list = drv.find_element_by_xpath("//div[@role='tablist']")
tab_list = html_list.find_elements_by_xpath("//a[@role='tab']")
drv.execute_script("window.scrollTo(0, 300)")
time.sleep(5)
for i in range(0, 5):
tab_list[i].click()
time.sleep(2)
for j in range(0, npage):
btn = drv.find_element_by_class_name("_vfm")
btn.click()
time.sleep(5)
time.sleep(5)
break
except:
continue
|
987,482 | f1c01bfec82fc86692ef50936a6377d254602e00 | """
印刷文字识别WebAPI接口调用示例接口文档(必看):https://doc.xfyun.cn/rest_api/%E5%8D%B0%E5%88%B7%E6%96%87%E5%AD%97%E8%AF%86%E5%88%AB.html
上传图片base64编码后进行urlencode要求base64编码和urlencode后大小不超过4M最短边至少15px,最长边最大4096px支持jpg/png/bmp格式
(Very Important)创建完webapi应用添加合成服务之后一定要设置ip白名单,找到控制台--我的应用--设置ip白名单,如何设置参考:http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=41891
错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)
@author iflytek
"""
# -*- coding: utf-8 -*-
import requests
import time
import hashlib
import base64
import pickle
import json
import lmdb
import os, glob
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
sys.path.append("..")
# from pic_feature_extract.vgg_extract_feature import extractor_imgcode,Encoder
from PIL import Image
from tqdm import tqdm
"""效果最好的,对竖着的差"""
# from urllib import parse
# 印刷文字识别 webapi 接口地址
URL = "http://webapi.xfyun.cn/v1/service/v1/ocr/general"
# 应用ID (必须为webapi类型应用,并印刷文字识别服务,参考帖子如何创建一个webapi应用:http://bbs.xfyun.cn/forum.php?mod=viewthread&tid=36481)
APPID = "0c293935"
# 接口密钥(webapi类型应用开通印刷文字识别服务后,控制台--我的应用---印刷文字识别---服务的apikey)
API_KEY = "2994a6604d85311a34dcb8ce0d28ce41"
# net=Encoder()
#调用请求头 修改调用参数
def getHeader(if_need_location):
# 当前时间戳
curTime = str(int(time.time()))
# 支持语言类型和是否开启位置定位(默认否)
param = {"language": "cn|en", "location": if_need_location}
param = json.dumps(param)
paramBase64 = base64.b64encode(param.encode('utf-8'))
m2 = hashlib.md5()
str1 = API_KEY + curTime + str(paramBase64, 'utf-8')
m2.update(str1.encode('utf-8'))
checkSum = m2.hexdigest()
# 组装http请求头
header = {
'X-CurTime': curTime,
'X-Param': paramBase64,
'X-Appid': APPID,
'X-CheckSum': checkSum,
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
}
return header
def get_all_pic(data_dir):
img_list = [os.path.join(data_dir, nm) for nm in os.listdir(data_dir) if nm[-3:] in ['jpg', 'png', 'gif']]
img_list.sort()
return img_list
#传入左上和右下坐标 进行描框
def plot_box_in_picture(picpath,boxes):
img = cv2.imread(picpath) # Read image with cv2
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB
# print(img.shape)
img1=Image.open(picpath)
index = 0
id=0
featurelist=[]
print("size为:", img1.size)
# 坐标不能有小数
for box in boxes:
startX = int(box["location"]["top_left"]["x"])
startY = int(box["location"]["top_left"]["y"])
endX = int(box["location"]["right_bottom"]["x"])
endY = int(box["location"]["right_bottom"]["y"])
print("box为:",(startX, startY), (endX, endY))
if startX>endX:startX,endX=endX,startX
if startY > endY: startY, endY = endY, startY
if startY ==endY: endY+=1
if startX==endX:endX+=1
cv2.rectangle(img, (startX, startY), (endX, endY), color=(0, 255, 0),
thickness=2) # Draw Rectangle with the coordinates
# 开始截取 用来对于ocr box feature
id+=1
region = img1.crop((startX,startY,endX,endY))
# 保存图片
# region.save("./pic/test{}.jpg".format(id))
# plt.imshow(region)
# plt.show()
#直接将region 送入到vgg extractor 中
#1channel to 3 channel pil
im1=im2=im3=region.convert("L")
region=Image.merge("RGB",(im1,im2,im3))
# feature=extractor_imgcode(region,net=net)
# feature=[round(i, 8) for i in feature]
# featurelist.append(feature)
#是否显示识别结构图片
# plt.figure(figsize=(5, 8)) # display the LMDB_output image
# plt.imshow(img)
# plt.xticks([])
# plt.yticks([])
# plt.show()
# return featurelist
def detet_a_pic_and_plot(pic_path):
# 上传文件并进行base64位编码
with open(pic_path, 'rb') as f:
f1 = f.read()
f1_base64 = str(base64.b64encode(f1), 'utf-8')
f.close()
data = {
'image': f1_base64
}
r = requests.post(URL, data=data, headers=getHeader(if_need_location="true"))
result = str(r.content, 'utf-8')
# 错误码链接:https://www.xfyun.cn/document/error-code (code返回错误码时必看)
# 解析返回结果
result_json = json.loads(result)
print(result_json)
#识别的文字结果和对应的精准位置
# result_json['data']["block"][0]["line"][1]["word"][0]["location"] 是文字行的结果
word_box_list = []
text_detect=result_json['data']["block"][0]["line"]
for i in text_detect:
for j in i["word"]:
word_box_list.append(j)
# print("图片{a}的识别结果为:".format(a=pic_path),word_box_list)
#描框
feature_list=plot_box_in_picture(pic_path,word_box_list) # 要描图此处取消注释
return word_box_list,feature_list
if __name__ == '__main__':
# 识别a example
pic_path="../test_data/003656.jpg"
result,feature=detet_a_pic_and_plot(pic_path)
print(result,feature)
# # 识别本地样例图片并 进行描图
def fun():
pic_list=get_all_pic("D:\\Download\\sample_picture\\train")
print("识别图片的数量为:",len(pic_list))
res=[]
feature_dictionary={}
env_db = lmdb.Environment('../test_data/MY_EST_CH_train_ocr_feature.lmdb',map_size=2089934592)
txn = env_db.begin(write=True)
try:
for i in pic_list:
pic_name=os.path.basename(i)
print(pic_name)
result,feature_list=detet_a_pic_and_plot(i)
res.append(result)
feature_dictionary[pic_name]=feature_list
# print(feature_dictionary)
# print("该图片的特征列表为:",feature_list)
# print(feature_list[0][:100])
# print(result)
# input("按任意键继续执行")
except: #异常执行
np.save("./MY_EST_CH_train_ocr_feature6.npy",feature_dictionary)
else:
np.save("./MY_EST_CH_train_ocr_feature6.npy",feature_dictionary)
# except:
# env_db = lmdb.Environment('../test_data/MY_EST_CH_all_train_ocr_feature.lmdb')
# txn = env_db.begin(write=True)
# for key,value in feature_dictionary.items():
# value={"image_id":key,"features":value,"bbox":None,"num_boxes":None,
# "image_height":None,"image_width":None,
# "objects":None,"cls_prob":None}
# txn.put(key=key.encode(), value=pickle.dumps(value))
# txn.commit() # 提交
# env_db.close()
#
# #保存并显示结果
# else:
# env_db = lmdb.Environment('../test_data/MY_EST_CH_all_train_ocr_feature.lmdb')
# txn = env_db.begin(write=True)
# for key, value in feature_dictionary.items():
# value = {"image_id": key, "features": value, "bbox": None, "num_boxes": None,
# "image_height": None, "image_width": None,
# "objects": None, "cls_prob": None}
# txn.put(key=key.encode(), value=pickle.dumps(value))
# txn.commit() # 提交
# env_db.close()
# fun()
# 识别中文部分所有图片并保存特征 为json文件
# pic_list = get_all_pic("D:\\Download\\sample_picture\\train2")
#
# print("识别图片的数量为:", len(pic_list)) #9515
# res = []
# print("*"*10,"detecting.....","*"*10)
# for i in tqdm(pic_list):
# pic_name=os.path.basename(i)
# print(pic_name)
# result = detet_a_pic_and_plot(i)
# res.append({str(pic_name):result})
# # time.sleep(0.5)
# with open('D:\\Download\\sample_picture\\中文训练集部分ocr结果2.json',"w",encoding="utf8") as f:
# json.dump(res,f,ensure_ascii=False)
# print("识别结束")
|
987,483 | 6b2d9421b3478266835bf579275866f432f84284 | # Generated by Django 2.2.5 on 2019-11-04 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Crime', '0003_auto_20190927_0401'),
]
operations = [
migrations.CreateModel(
name='Locationdata',
fields=[
('location', models.CharField(blank=True, max_length=64, null=True)),
('inside_outside', models.CharField(blank=True, max_length=7, null=True)),
('post', models.CharField(blank=True, max_length=8, null=True)),
('district', models.CharField(blank=True, max_length=64, null=True)),
('neighborhood', models.CharField(blank=True, max_length=64, null=True)),
('longitude', models.DecimalField(blank=True, decimal_places=10, max_digits=12, null=True)),
('latitude', models.DecimalField(blank=True, decimal_places=10, max_digits=12, null=True)),
('location1', models.CharField(blank=True, max_length=48, null=True)),
('premise', models.CharField(blank=True, max_length=48, null=True)),
('vri_name1', models.CharField(blank=True, max_length=64, null=True)),
('locationid', models.IntegerField(db_column='locationId', primary_key=True, serialize=False)),
],
options={
'db_table': 'LocationData',
'managed': False,
},
),
migrations.CreateModel(
name='Stagingtable',
fields=[
('crimedate', models.DateField(blank=True, db_column='crimeDate', null=True)),
('crimetime', models.TimeField(blank=True, db_column='crimeTime', null=True)),
('crimecode', models.CharField(blank=True, db_column='crimeCode', max_length=12, null=True)),
('location', models.CharField(blank=True, max_length=64, null=True)),
('description', models.CharField(blank=True, max_length=64, null=True)),
('inside_outside', models.CharField(blank=True, max_length=12, null=True)),
('weapon', models.CharField(blank=True, max_length=64, null=True)),
('post', models.CharField(blank=True, max_length=8, null=True)),
('district', models.CharField(blank=True, max_length=64, null=True)),
('neighborhood', models.CharField(blank=True, max_length=64, null=True)),
('longitude', models.DecimalField(blank=True, decimal_places=10, max_digits=12, null=True)),
('latitude', models.DecimalField(blank=True, decimal_places=10, max_digits=12, null=True)),
('location1', models.CharField(blank=True, max_length=48, null=True)),
('premise', models.CharField(blank=True, max_length=48, null=True)),
('vri_name1', models.CharField(blank=True, max_length=64, null=True)),
('total_incidents', models.IntegerField(blank=True, null=True)),
('crimeid', models.AutoField(db_column='crimeId', primary_key=True, serialize=False)),
],
options={
'db_table': 'StagingTable',
'managed': False,
},
),
migrations.DeleteModel(
name='Crimelocations',
),
]
|
987,484 | 0525559abfa982b5b96603301caa5b7da808e3aa | import collections
import functools
import math
import os
import shlex
import subprocess
import sys
import typing as t
import attr
import click
import lark
import clout.exceptions
from .. import _util
ALWAYS_ACCEPT = True
HELP_COMMAND = "HELP_COMMAND"
class CountingBaseCommand:
def __init__(self, *args, nargs=1, required=False, **kwargs):
self.nargs = nargs
self.required = required
super().__init__(*args, **kwargs)
@property
def multiple(self):
return self.nargs != 1
class CountingGroup(CountingBaseCommand, click.Group):
pass
class CountingCommand(CountingBaseCommand, click.Command):
pass
@functools.singledispatch
def to_lark(obj: object):
raise NotImplementedError(obj)
def quote(s):
return '"' + s + '"'
def par(s):
return f"({s})"
def one_of(items):
items = list(items)
if len(items) == 1:
return str(items[0])
return par(" | ".join(items))
def name_rule(obj) -> str:
return f"{obj.name.lstrip('-').replace('-', '_')}_{id(obj)}"
@to_lark.register
def _(option: click.Option):
if option.is_flag:
return (
f"{name_rule(option)} : "
+ "|".join(quote(decl) for decl in option.opts)
+ "\n"
)
return (
f"{name_rule(option)} : "
+ one_of(quote(decl) + ' "="? value' for decl in option.opts)
+ "\n"
)
@to_lark.register
def _(arg: click.Argument):
return f"{name_rule(arg)} : value\n"
def min_params(cmd: click.BaseCommand) -> int:
return sum(
1 if p.nargs == -1 or p.multiple else p.nargs
for p in cmd.params
if p.required or not p.default
)
def max_params(cmd: click.BaseCommand) -> t.Union[int, float]:
return sum(math.inf if p.nargs == -1 or p.multiple else p.nargs for p in cmd.params)
@to_lark.register
def _(cmd: CountingCommand):
optionals = [name_rule(p) for p in cmd.params if not p.required or p.default]
requireds = [name_rule(p) for p in cmd.params if p.required]
params = one_of(optionals + requireds)
if max_params(cmd) == math.inf:
params += f"+"
else:
params += f" ~ {min_params(cmd)}..{max_params(cmd)}"
out = f"{name_rule(cmd)} : {quote(cmd.name)} {params} \n"
for p in cmd.params:
out += to_lark(p)
return out
@to_lark.register
def _(grp: CountingGroup):
if not grp.commands:
return to_lark.dispatch(CountingCommand)(grp)
command = one_of(name_rule(c) for c in grp.commands.values())
optionals = [name_rule(p) for p in grp.params if not p.required]
requireds = [name_rule(p) for p in grp.params if p.required]
params = one_of(optionals + requireds)
if grp.params:
out = f"{name_rule(grp)} : {quote(grp.name)} ({command}|{params})+ \n"
else:
out = f"{name_rule(grp)} : {quote(grp.name)} {command}+ \n"
for c in grp.commands.values():
out += to_lark(c)
for p in grp.params:
out += to_lark(p)
return out
def build_grammar(grp):
grammar = to_lark(grp)
grammar += "?value : /\\S+/\n"
grammar += f"?start : {name_rule(grp)}\n"
grammar += "%import common.CNAME\n"
grammar += "%import common.WS -> _WHITE\n"
grammar += "%ignore _WHITE\n"
return grammar
def get_base_commands(grp: click.BaseCommand) -> t.Iterator[click.BaseCommand]:
yield grp
if not hasattr(grp, "commands"):
return
for cmd in grp.commands.values():
yield from get_base_commands(cmd)
class Walker(lark.Visitor):
def __init__(self, *args, group, **kwargs):
self.group = group
super().__init__(*args, **kwargs)
base_commands = list(get_base_commands(self.group))
self.all_param_names = {name_rule(p) for c in base_commands for p in c.params}
for cmd in base_commands:
name, method = self.make_validation_method(cmd)
setattr(self, name, method)
def make_validation_method(self, command):
def param_validation_method(parsed_command):
counter = collections.Counter(p.data for p in parsed_command.children)
for param_or_cmd in list(command.params) + list(
getattr(command, "commands", {}).values()
):
param_or_cmd_id = name_rule(param_or_cmd)
observed = counter.get(param_or_cmd_id, 0)
if observed > param_or_cmd.nargs:
raise TooManyArgs(param_or_cmd)
return name_rule(command), param_validation_method
class CLIParsingErrorException(Exception):
pass
class InvalidInput(CLIParsingErrorException):
pass
class HelpRequested(Exception):
pass
class TooManyArgs(CLIParsingErrorException):
pass
class AmbiguousArgs(CLIParsingErrorException):
pass
class Transformer(lark.Transformer):
def __init__(self, *args, group, use_defaults, **kwargs):
self.group = group
self.use_defaults = use_defaults
super().__init__(*args, **kwargs)
base_commands = list(get_base_commands(self.group))
self.all_param_names = {name_rule(p) for c in base_commands for p in c.params}
for cmd in base_commands:
if isinstance(cmd, CountingCommand) and not isinstance(
cmd, click.MultiCommand
):
# Not a group
name, method = self.make_command_method(cmd)
setattr(self, name, method)
for param in cmd.params:
name, method = self.make_param_method(cmd, param)
setattr(self, name, method)
if isinstance(cmd, click.MultiCommand):
name, method = self.make_group_method(cmd)
setattr(self, name, method)
def process_params(self, command, parsed):
d = {}
for param, value in parsed:
if param.name == "--help":
print(command.get_help(click.Context(command)))
sys.exit()
if isinstance(param, click.Parameter):
value = str(value)
if param.multiple or param.nargs != 1:
d.setdefault(param, []).append(value)
else:
d[param] = value
out = {}
for param, value in d.items():
if isinstance(param, click.Parameter):
try:
out[param.name] = param.process_value(click.Context(command), value)
except click.exceptions.BadParameter as e:
# XXX
raise
elif isinstance(param, click.BaseCommand):
out[param.name] = {k: v for k, v in value.items() if v != _util.UNSET}
else:
raise TypeError(param)
if self.use_defaults:
for param in command.params:
if param.name not in out and not param.required:
out[param.name] = param.default
return command, out
def make_command_method(self, command):
return (name_rule(command), lambda parsed: self.process_params(command, parsed))
def make_group_method(self, group):
def method(parsed):
import lark
_group, out = self.process_params(
group, [(obj, value) for obj, value in parsed]
)
return (group, out)
return name_rule(group), method
def make_param_method(self, cmd, param):
def method(parsed):
if param.is_flag:
parsed = True
else:
[parsed] = parsed
return (param, parsed)
return name_rule(param), method
class RemoveInvalidBranches(lark.Transformer):
def __init__(self, *args, group, **kwargs):
self.group = group
super().__init__(*args, **kwargs)
base_commands = list(get_base_commands(self.group))
self.all_param_names = {name_rule(p) for c in base_commands for p in c.params}
def _ambig(self, data):
trees = [tree for tree in data if check_validity(self.group, tree)]
if len(trees) == 1:
return trees[0]
if len(trees) == 0:
raise InvalidInput(data)
trees = [tree for tree in trees if has_help(tree)]
if not trees:
raise AmbiguousArgs(data)
# Pick one.
# Picking the first seems to result in help being deeper which is probably good.
# TODO ensure help is deeper.
return trees[0]
def has_help(tree):
return bool(tree.find_data(HELP_COMMAND))
def check_validity(group, tree):
try:
Walker(group=group).visit(tree)
except TooManyArgs:
return False
return True
def find_missing_input(parser, s: str) -> t.Optional[t.List[str]]:
words = shlex.split(s)
try:
parser.parse(subprocess.list2cmdline(words))
except lark.exceptions.ParseError:
pass
else:
return None
while True:
try:
parser.parse(subprocess.list2cmdline(words[:-1]))
except lark.exceptions.ParseError:
words = words[:-1]
else:
return words
@attr.dataclass
class Parser:
group: CountingGroup
callback: t.Callable = lambda **kw: kw
use_defaults: bool = True
_id_to_object: t.Dict[str, object] = attr.ib(factory=dict)
def __attrs_post_init__(self):
for cmd in get_base_commands(self.group):
self._id_to_object[name_rule(cmd)] = cmd
for param in cmd.params:
self._id_to_object[name_rule(param)] = param
def parse_string(self, s):
grammar = build_grammar(self.group)
if int(os.environ.get("CLI_SHOW_GRAMMAR", 0)):
print(grammar)
parser = lark.Lark(grammar, parser="earley", ambiguity="explicit")
try:
tree = parser.parse(s)
except lark.exceptions.ParseError as e:
found = find_missing_input(parser, s)
raise clout.exceptions.MissingInput(self.group, s, found) from e
try:
tree = RemoveInvalidBranches(group=self.group).transform(tree)
except AmbiguousArgs:
click.echo(
"The command arguments were ambiguous. Rearranging terms might help."
)
transformer = Transformer(group=self.group, use_defaults=self.use_defaults)
try:
_group, value = transformer.transform(tree)
except lark.exceptions.VisitError as e:
raise e.orig_exc from e
return value
def parse_args(self, args: t.List[str]):
line = subprocess.list2cmdline(args)
return self.parse_string(line)
|
987,485 | 80d8a67fd76c1b74f9e0dc19ac423afa418f5c05 | # Generated by Django 3.0.3 on 2020-09-10 05:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wxchat', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='isonline',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='user',
name='socketid',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=20, unique=True),
),
]
|
987,486 | b10a21adb5028aa51948ead0e6770bf388fbe1ba | from .models import Inbox, Setting, Dislike, Like, UserPhoto, Match, Profile
from django.views.generic import CreateView, UpdateView, DetailView, ListView
from django.shortcuts import render
# Create your views here.
|
987,487 | b138d59edfd9c8c8b24fad3d4cdc2e12c732c3fc | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2020 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
# Miguel Ángel Fernández <mafesan@bitergia.com>
#
import json
import re
import graphene
import graphql_jwt
from django.conf import settings
from django.core.paginator import Paginator
from django.db.models import Q, Subquery
from django.db.models import JSONField
from django_rq import enqueue
from graphene.types.generic import GenericScalar
from graphene.utils.str_converters import to_snake_case
from graphene_django.converter import convert_django_field
from graphene_django.types import DjangoObjectType
from grimoirelab_toolkit.datetime import (str_to_datetime,
InvalidDateError)
from .api import (add_identity,
delete_identity,
update_profile,
move_identity,
lock,
unlock,
merge,
unmerge_identities,
add_organization,
add_domain,
delete_organization,
delete_domain,
enroll,
withdraw,
update_enrollment)
from .context import SortingHatContext
from .decorators import check_auth
from .errors import InvalidFilterError
from .jobs import (affiliate,
unify,
find_job,
get_jobs,
recommend_affiliations,
recommend_matches)
from .models import (Organization,
Domain,
Country,
Individual,
Identity,
Profile,
Enrollment,
Transaction,
Operation)
@convert_django_field.register(JSONField)
def convert_json_field_to_generic_scalar(field, registry=None):
"""Convert the content of a `JSONField` loading it as an object"""
return OperationArgsType(description=field.help_text, required=not field.null)
def parse_date_filter(filter_value):
"""Extract the filter terms from a date filter
The accepted formats are controlled by regular expressions
matching two patterns: a comparison operator (>, >=, <, <=) and a date
OR a range operator (..) between two dates.
The accepted date format is ISO 8601, YYYY-MM-DDTHH:MM:SSZ, also
accepting microseconds and time zone offset (YYYY-MM-DDTHH:MM:SS.ms+HH:HH).
:param filter_value: String containing the filter value
:returns: A dictionary including an operator and the datetime values
"""
# Accepted date format is ISO 8601, YYYY-MM-DDTHH:MM:SSZ (no `Z` is accepted too)
filter_data = {
"operator": None,
"date1": None,
"date2": None
}
iso_date_group = r"(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|\+\d{2}:\d{2})?)"
# Filter with a comparison operator (>, >=, <, <=) and a date (e.g. `>=YYYY-MM-DDTHH:MM:SS`)
oper_comparison = r"^(<=?|>=?)%s$" % iso_date_group
# Filter with a range operator (..) between two dates
# (e.g. YYYY-MM-DDTHH:MM:SSZ..YYYY-MM-DDTHH:MM:SSZ)
range_comparison = r"^%s\.{2}%s$" % (iso_date_group, iso_date_group)
oper_result = re.match(oper_comparison, filter_value)
range_result = re.match(range_comparison, filter_value)
if not oper_result and not range_result:
raise ValueError('Filter format is not valid')
if oper_result:
filter_data['operator'] = oper_result.group(1)
filter_data['date1'] = str_to_datetime(oper_result.group(2))
if range_result:
filter_data['operator'] = '..'
filter_data['date1'] = str_to_datetime(range_result.group(1))
filter_data['date2'] = str_to_datetime(range_result.group(4))
if filter_data['date1'] > filter_data['date2']:
range_msg = 'Date range is invalid. Upper bound must be greater than the lower bound'
raise ValueError(range_msg)
return filter_data
class PaginationType(graphene.ObjectType):
page = graphene.Int(description='Current page.')
page_size = graphene.Int(description='Number of items per page.')
num_pages = graphene.Int(description='Total number of pages.')
has_next = graphene.Boolean(description='Whether there is a page after the current one.')
has_prev = graphene.Boolean(description='Whether there is a page before the current one.')
start_index = graphene.Int(description='Index of the first item on the page.')
end_index = graphene.Int(description='Index of the last item on the page.')
total_results = graphene.Int(description='Total number of items.')
class OperationArgsType(GenericScalar):
@classmethod
def serialize(cls, value):
value = super().serialize(value)
value = json.loads(value)
return value
class OperationType(DjangoObjectType):
class Meta:
model = Operation
class TransactionType(DjangoObjectType):
class Meta:
model = Transaction
class OrganizationType(DjangoObjectType):
class Meta:
model = Organization
class DomainType(DjangoObjectType):
class Meta:
model = Domain
class CountryType(DjangoObjectType):
class Meta:
model = Country
class IndividualType(DjangoObjectType):
class Meta:
model = Individual
class IdentityType(DjangoObjectType):
class Meta:
model = Identity
class ProfileType(DjangoObjectType):
class Meta:
model = Profile
class EnrollmentType(DjangoObjectType):
class Meta:
model = Enrollment
class AffiliationRecommendationType(graphene.ObjectType):
uuid = graphene.String(description='The unique identifier of an individual.')
organizations = graphene.List(graphene.String, description='List of recommended organizations.')
class MatchesRecommendationType(graphene.ObjectType):
uuid = graphene.String(description='The unique identifier of an individual.')
matches = graphene.List(graphene.String, description='List of recommended matches.')
class AffiliationResultType(graphene.ObjectType):
uuid = graphene.String(description='The unique identifier of an individual.')
organizations = graphene.List(
graphene.String,
description='List of organizations an individual was affilated to using matching recommendations.'
)
class UnifyResultType(graphene.ObjectType):
merged = graphene.List(
graphene.String,
description='List of individuals that were merged using matching recommendations.'
)
class JobResultType(graphene.Union):
class Meta:
types = (AffiliationResultType,
AffiliationRecommendationType,
MatchesRecommendationType,
UnifyResultType)
class JobType(graphene.ObjectType):
job_id = graphene.String(description='Job identifier.')
job_type = graphene.String(description='Type of job.')
status = graphene.String(description='Job status (`started`, `deferred`, `finished`, `failed` or `scheduled`).')
result = graphene.List(JobResultType, description='List of job results.')
errors = graphene.List(graphene.String, description='List of errors.')
enqueued_at = graphene.DateTime(description='Time the job was enqueued at.')
class ProfileInputType(graphene.InputObjectType):
name = graphene.String(required=False, description='Name of the individual.')
email = graphene.String(required=False, description='Email address of the individual.')
gender = graphene.String(required=False, description='Gender of the individual.')
gender_acc = graphene.Int(
required=False,
description='Gender accuracy (range of 1 to 100; by default, set to 100).'
)
is_bot = graphene.Boolean(required=False, description='Whether an individual is a bot or not.')
country_code = graphene.String(
required=False,
description='ISO-3166 country code. Examples: `DK` for Denmark, `IT` for Italy.'
)
class CountryFilterType(graphene.InputObjectType):
code = graphene.String(
required=False,
description='Filter countries with an ISO Alpha 2 country code. Examples: `DK` for Denmark, `IT` for Italy.'
)
term = graphene.String(
required=False,
description='Filter countries whose name contains the term.'
)
class OrganizationFilterType(graphene.InputObjectType):
name = graphene.String(
required=False,
description='Filter organizations with an exact name match.'
)
term = graphene.String(
required=False,
description='Filter organizations whose name or domains include the term.'
)
class IdentityFilterType(graphene.InputObjectType):
uuid = graphene.String(
required=False,
description='Find an identity by its unique identifier.'
)
term = graphene.String(
required=False,
description='Filter individuals whose name, email or username contain the term.'
)
is_locked = graphene.Boolean(
required=False,
description='Filters individuals by whether their profiles are locked and cannot be edited.'
)
is_bot = graphene.Boolean(
required=False,
description='Filters individuals by whether they have been marked as bots.'
)
gender = graphene.String(
required=False,
description='Filters individuals by their gender.'
)
country = graphene.String(
required=False,
description='Filters individuals using an ISO Alpha 3 or Alpha 2 country code, or with a country name.\
Examples:\n * `GB`\n * `GBR`\n * `United Kingdom`'
)
source = graphene.String(
required=False,
description='Filters individuals by the data source of their identities.'
)
enrollment = graphene.String(
required=False,
description='Filters individuals affiliated to an organization.'
)
enrollment_date = graphene.String(
required=False,
description='Filter with a comparison operator (>, >=, <, <=) and a date OR with a range operator (..) between\
two dates, following ISO-8601 format. Examples:\n* `>=2020-10-12T09:35:06.13045+01:00` \
\n * `2020-10-12T00:00:00..2020-11-22T00:00:00`.'
)
is_enrolled = graphene.Boolean(
required=False,
description='Filter individuals by whether they are affiliated to any organization.'
)
last_updated = graphene.String(
required=False,
description='Filter with a comparison operator (>, >=, <, <=) and a date OR with a range operator (..) between\
two dates, following ISO-8601 format. Examples:\n* `>=2020-10-12T09:35:06.13045+01:00` \
\n * `2020-10-12T00:00:00..2020-11-22T00:00:00`.'
)
class TransactionFilterType(graphene.InputObjectType):
tuid = graphene.String(
required=False,
description='Find a transaction using its unique id.'
)
name = graphene.String(
required=False,
description='Find a transaction using its name.'
)
is_closed = graphene.Boolean(
required=False,
description='Filter transactions by whether they are closed.'
)
from_date = graphene.DateTime(
required=False,
description='Find transactions created after a date, following ISO-8601 format. For example, `2020-04-22T00:00:00Z`.'
)
to_date = graphene.DateTime(
required=False,
description='Find transactions created before a date, following ISO-8601 format. For example, `2020-04-22T00:00:00Z`.'
)
authored_by = graphene.String(
required=False,
description='Filter transactions using the username of their author.'
)
class OperationFilterType(graphene.InputObjectType):
ouid = graphene.String(
required=False,
description='Find an operation using its unique id.'
)
op_type = graphene.String(
required=False,
description='Filter operations by their type: `ADD`, `DELETE` or `UPDATE`.'
)
entity_type = graphene.String(
required=False,
description='Filter by the type of entity involved in the operations, eg. `individual`, `profile`, `enrollment`.'
)
target = graphene.String(
required=False,
description='Filter by the argument which the operation is directed to.'
)
from_date = graphene.DateTime(
required=False,
description='Find operations created after a date, following ISO-8601 format. For example, `2020-04-22T00:00:00Z`.'
)
to_date = graphene.DateTime(
required=False,
description='Find operations created before a date, following ISO-8601 format. For example, `2020-04-22T00:00:00Z`.'
)
class AbstractPaginatedType(graphene.ObjectType):
@classmethod
def create_paginated_result(cls, query, page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE):
paginator = Paginator(query, page_size)
result = paginator.page(page)
entities = result.object_list
page_info = PaginationType(
page=result.number,
page_size=page_size,
num_pages=paginator.num_pages,
has_next=result.has_next(),
has_prev=result.has_previous(),
start_index=result.start_index(),
end_index=result.end_index(),
total_results=len(query)
)
return cls(entities=entities, page_info=page_info)
class CountryPaginatedType(AbstractPaginatedType):
entities = graphene.List(CountryType, description='A list of countries.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class OrganizationPaginatedType(AbstractPaginatedType):
entities = graphene.List(OrganizationType, description='A list of organizations.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class IdentityPaginatedType(AbstractPaginatedType):
entities = graphene.List(IndividualType, description='A list of identities.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class TransactionPaginatedType(AbstractPaginatedType):
entities = graphene.List(TransactionType, description='A list of transactions.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class OperationPaginatedType(AbstractPaginatedType):
entities = graphene.List(OperationType, description='A list of operations.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class JobPaginatedType(AbstractPaginatedType):
entities = graphene.List(JobType, description='A list of jobs.')
page_info = graphene.Field(PaginationType, description='Information to aid in pagination.')
class AddOrganization(graphene.Mutation):
class Arguments:
name = graphene.String()
organization = graphene.Field(lambda: OrganizationType)
@check_auth
def mutate(self, info, name):
user = info.context.user
ctx = SortingHatContext(user)
org = add_organization(ctx, name)
return AddOrganization(
organization=org
)
class DeleteOrganization(graphene.Mutation):
class Arguments:
name = graphene.String()
organization = graphene.Field(lambda: OrganizationType)
@check_auth
def mutate(self, info, name):
user = info.context.user
ctx = SortingHatContext(user)
org = delete_organization(ctx, name)
return DeleteOrganization(
organization=org
)
class AddDomain(graphene.Mutation):
class Arguments:
organization = graphene.String()
domain = graphene.String()
is_top_domain = graphene.Boolean()
domain = graphene.Field(lambda: DomainType)
@check_auth
def mutate(self, info, organization, domain, is_top_domain=False):
user = info.context.user
ctx = SortingHatContext(user)
dom = add_domain(ctx,
organization,
domain,
is_top_domain=is_top_domain)
return AddDomain(
domain=dom
)
class DeleteDomain(graphene.Mutation):
class Arguments:
domain = graphene.String()
domain = graphene.Field(lambda: DomainType)
@check_auth
def mutate(self, info, domain):
user = info.context.user
ctx = SortingHatContext(user)
dom = delete_domain(ctx, domain)
return DeleteDomain(
domain=dom
)
class AddIdentity(graphene.Mutation):
class Arguments:
source = graphene.String()
name = graphene.String()
email = graphene.String()
username = graphene.String()
uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, source,
name=None, email=None, username=None,
uuid=None):
user = info.context.user
ctx = SortingHatContext(user)
identity = add_identity(ctx,
source,
name=name,
email=email,
username=username,
uuid=uuid)
individual = identity.individual
return AddIdentity(
uuid=identity.uuid,
individual=individual
)
class DeleteIdentity(graphene.Mutation):
class Arguments:
uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid):
user = info.context.user
ctx = SortingHatContext(user)
individual = delete_identity(ctx, uuid)
return DeleteIdentity(
uuid=uuid,
individual=individual
)
class Lock(graphene.Mutation):
class Arguments:
uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid):
user = info.context.user
ctx = SortingHatContext(user)
individual = lock(ctx, uuid)
return Lock(
uuid=uuid,
individual=individual
)
class Unlock(graphene.Mutation):
class Arguments:
uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid):
user = info.context.user
ctx = SortingHatContext(user)
individual = unlock(ctx, uuid)
return Unlock(
uuid=uuid,
individual=individual
)
class UpdateProfile(graphene.Mutation):
class Arguments:
uuid = graphene.String()
data = ProfileInputType()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid, data):
user = info.context.user
ctx = SortingHatContext(user)
individual = update_profile(ctx, uuid, **data)
return UpdateProfile(
uuid=individual.mk,
individual=individual
)
class MoveIdentity(graphene.Mutation):
class Arguments:
from_uuid = graphene.String()
to_uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, from_uuid, to_uuid):
user = info.context.user
ctx = SortingHatContext(user)
individual = move_identity(ctx, from_uuid, to_uuid)
return MoveIdentity(
uuid=individual.mk,
individual=individual
)
class Merge(graphene.Mutation):
class Arguments:
from_uuids = graphene.List(graphene.String)
to_uuid = graphene.String()
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, from_uuids, to_uuid):
user = info.context.user
ctx = SortingHatContext(user)
individual = merge(ctx, from_uuids, to_uuid)
return Merge(
uuid=individual.mk,
individual=individual
)
class UnmergeIdentities(graphene.Mutation):
class Arguments:
uuids = graphene.List(graphene.String)
uuids = graphene.Field(lambda: graphene.List(graphene.String))
individuals = graphene.Field(lambda: graphene.List(IndividualType))
@check_auth
def mutate(self, info, uuids):
user = info.context.user
ctx = SortingHatContext(user)
individuals = unmerge_identities(ctx, uuids)
uuids = [individual.mk for individual in individuals]
return UnmergeIdentities(
uuids=uuids,
individuals=individuals
)
class Enroll(graphene.Mutation):
class Arguments:
uuid = graphene.String()
organization = graphene.String()
from_date = graphene.DateTime(required=False)
to_date = graphene.DateTime(required=False)
force = graphene.Boolean(required=False)
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid, organization,
from_date=None, to_date=None,
force=False):
user = info.context.user
ctx = SortingHatContext(user)
individual = enroll(ctx, uuid, organization,
from_date=from_date, to_date=to_date,
force=force)
return Enroll(
uuid=individual.mk,
individual=individual
)
class Withdraw(graphene.Mutation):
class Arguments:
uuid = graphene.String()
organization = graphene.String()
from_date = graphene.DateTime(required=False)
to_date = graphene.DateTime(required=False)
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid, organization, from_date=None, to_date=None):
user = info.context.user
ctx = SortingHatContext(user)
individual = withdraw(ctx, uuid, organization,
from_date=from_date, to_date=to_date)
return Withdraw(
uuid=individual.mk,
individual=individual
)
class UpdateEnrollment(graphene.Mutation):
class Arguments:
uuid = graphene.String()
organization = graphene.String()
from_date = graphene.DateTime()
to_date = graphene.DateTime()
new_from_date = graphene.DateTime(required=False)
new_to_date = graphene.DateTime(required=False)
force = graphene.Boolean(required=False)
uuid = graphene.Field(lambda: graphene.String)
individual = graphene.Field(lambda: IndividualType)
@check_auth
def mutate(self, info, uuid, organization,
from_date, to_date,
new_from_date=None, new_to_date=None,
force=True):
user = info.context.user
ctx = SortingHatContext(user)
individual = update_enrollment(ctx, uuid, organization,
from_date=from_date,
to_date=to_date,
new_from_date=new_from_date,
new_to_date=new_to_date,
force=force)
return UpdateEnrollment(
uuid=individual.mk,
individual=individual
)
class RecommendAffiliations(graphene.Mutation):
class Arguments:
uuids = graphene.List(graphene.String,
required=False)
job_id = graphene.Field(lambda: graphene.String)
@check_auth
def mutate(self, info, uuids=None):
user = info.context.user
ctx = SortingHatContext(user)
job = enqueue(recommend_affiliations, ctx, uuids)
return RecommendAffiliations(
job_id=job.id
)
class RecommendMatches(graphene.Mutation):
class Arguments:
source_uuids = graphene.List(graphene.String)
target_uuids = graphene.List(graphene.String,
required=False)
criteria = graphene.List(graphene.String)
verbose = graphene.Boolean(required=False)
job_id = graphene.Field(lambda: graphene.String)
@check_auth
def mutate(self, info, source_uuids, criteria, target_uuids=None, verbose=False):
user = info.context.user
ctx = SortingHatContext(user)
job = enqueue(recommend_matches, ctx, source_uuids, target_uuids, criteria, verbose)
return RecommendMatches(
job_id=job.id
)
class Affiliate(graphene.Mutation):
class Arguments:
uuids = graphene.List(graphene.String,
required=False)
job_id = graphene.Field(lambda: graphene.String)
@check_auth
def mutate(self, info, uuids=None):
user = info.context.user
ctx = SortingHatContext(user)
job = enqueue(affiliate, ctx, uuids)
return Affiliate(
job_id=job.id
)
class Unify(graphene.Mutation):
class Arguments:
source_uuids = graphene.List(graphene.String)
target_uuids = graphene.List(graphene.String,
required=False)
criteria = graphene.List(graphene.String)
job_id = graphene.Field(lambda: graphene.String)
@check_auth
def mutate(self, info, source_uuids, criteria, target_uuids=None):
user = info.context.user
ctx = SortingHatContext(user)
job = enqueue(unify, ctx, source_uuids, target_uuids, criteria)
return Unify(
job_id=job.id
)
class SortingHatQuery:
countries = graphene.Field(
CountryPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
filters=CountryFilterType(required=False),
description='Find countries.'
)
organizations = graphene.Field(
OrganizationPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
filters=OrganizationFilterType(required=False),
description='Find organizations.'
)
individuals = graphene.Field(
IdentityPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
filters=IdentityFilterType(required=False),
order_by=graphene.String(required=False),
description='Find individuals.'
)
transactions = graphene.Field(
TransactionPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
filters=TransactionFilterType(required=False),
description='Find transactions.'
)
operations = graphene.Field(
OperationPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
filters=OperationFilterType(required=False),
description='Find operations.'
)
job = graphene.Field(
JobType,
job_id=graphene.String(),
description='Find a single job by its id.'
)
jobs = graphene.Field(
JobPaginatedType,
page_size=graphene.Int(),
page=graphene.Int(),
description='Get all jobs.'
)
@check_auth
def resolve_countries(self, info, filters=None,
page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE):
query = Country.objects.order_by('code')
if filters and 'code' in filters:
query = query.filter(code=filters['code'])
if filters and 'term' in filters:
query = query.filter(name__icontains=filters['term'])
return CountryPaginatedType.create_paginated_result(query,
page,
page_size=page_size)
@check_auth
def resolve_organizations(self, info, filters=None,
page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE,
**kwargs):
query = Organization.objects.order_by('name')
if filters and 'name' in filters:
query = query.filter(name=filters['name'])
if filters and 'term' in filters:
search_term = filters['term']
query = query.filter(Q(name__icontains=search_term) |
Q(name__in=Subquery(Domain.objects
.filter(domain__icontains=search_term)
.values_list('organization__name'))))
return OrganizationPaginatedType.create_paginated_result(query,
page,
page_size=page_size)
@check_auth
def resolve_individuals(self, info, filters=None,
page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE,
order_by='mk',
**kwargs):
query = Individual.objects.order_by(to_snake_case(order_by))
if filters and 'uuid' in filters:
indv_uuid = filters['uuid']
# Search among all the individuals and their identities
query = query.filter(mk__in=Subquery(Identity.objects
.filter(Q(uuid=indv_uuid) |
Q(individual__mk=indv_uuid))
.values_list('individual__mk')))
if filters and 'term' in filters:
search_term = filters['term']
# Filter matching individuals by their mk and their identities
query = query.filter(mk__in=Subquery(Identity.objects
.filter(Q(name__icontains=search_term) |
Q(email__icontains=search_term) |
Q(username__icontains=search_term) |
Q(individual__profile__name__icontains=search_term) |
Q(individual__profile__email__icontains=search_term))
.values_list('individual__mk')))
if filters and 'is_locked' in filters:
query = query.filter(is_locked=filters['is_locked'])
if filters and 'is_bot' in filters:
query = query.filter(mk__in=Subquery(Profile.objects
.filter(is_bot=filters['is_bot'])
.values_list('individual__mk')))
if filters and 'gender' in filters:
query = query.filter(profile__gender=filters['gender'])
if filters and 'country' in filters:
country = filters['country']
query = query.filter(mk__in=Subquery(Profile.objects
.filter(Q(country__name__icontains=country) |
Q(country__code=country) |
Q(country__alpha3=country))
.values_list('individual__mk')))
if filters and 'source' in filters:
query = query.filter(identities__source=filters['source'])
if filters and 'enrollment' in filters:
query = query.filter(enrollments__organization__name__icontains=filters['enrollment'])
if filters and 'enrollment_date' in filters:
# Accepted date format is ISO 8601, YYYY-MM-DDTHH:MM:SS
try:
filter_data = parse_date_filter(filters['enrollment_date'])
except ValueError as e:
raise InvalidFilterError(filter_name='enrollment_date', msg=e)
except InvalidDateError as e:
raise InvalidFilterError(filter_name='enrollment_date', msg=e)
date1 = filter_data['date1']
date2 = filter_data['date2']
if filter_data['operator']:
operator = filter_data['operator']
if operator == '<':
query = query.filter(mk__in=Subquery(Enrollment.objects
.filter(start__lt=date1)
.values_list('individual__mk')))
elif operator == '<=':
query = query.filter(mk__in=Subquery(Enrollment.objects
.filter(start__lte=date1)
.values_list('individual__mk')))
elif operator == '>':
query = query.filter(mk__in=Subquery(Enrollment.objects
.filter(end__gt=date1)
.values_list('individual__mk')))
elif operator == '>=':
query = query.filter(mk__in=Subquery(Enrollment.objects
.filter(end__gte=date1)
.values_list('individual__mk')))
elif operator == '..':
query = query.filter(mk__in=Subquery(Enrollment.objects
.filter(start__lte=date2,
end__gte=date1)
.values_list('individual__mk')))
if filters and 'is_enrolled' in filters:
query = query.filter(enrollments__isnull=not filters['is_enrolled'])
if filters and 'last_updated' in filters:
# Accepted date format is ISO 8601, YYYY-MM-DDTHH:MM:SS
try:
filter_data = parse_date_filter(filters['last_updated'])
except ValueError as e:
raise InvalidFilterError(filter_name='last_updated', msg=e)
except InvalidDateError as e:
raise InvalidFilterError(filter_name='last_updated', msg=e)
date1 = filter_data['date1']
date2 = filter_data['date2']
if filter_data['operator']:
operator = filter_data['operator']
if operator == '<':
query = query.filter(last_modified__lt=date1)
elif operator == '<=':
query = query.filter(last_modified__lte=date1)
elif operator == '>':
query = query.filter(last_modified__gt=date1)
elif operator == '>=':
query = query.filter(last_modified__gte=date1)
elif operator == '..':
query = query.filter(last_modified__range=(date1, date2))
return IdentityPaginatedType.create_paginated_result(query,
page,
page_size=page_size)
@check_auth
def resolve_job(self, info, job_id):
job = find_job(job_id)
status = job.get_status()
job_type = job.func_name.split('.')[-1]
enqueued_at = job.enqueued_at
result = None
errors = None
if (job.result) and (job_type == 'affiliate'):
errors = job.result['errors']
result = [
AffiliationResultType(uuid=uuid, organizations=orgs)
for uuid, orgs in job.result['results'].items()
]
elif (job.result) and (job_type == 'recommend_affiliations'):
result = [
AffiliationRecommendationType(uuid=uuid, organizations=orgs)
for uuid, orgs in job.result['results'].items()
]
elif (job.result) and (job_type == 'recommend_matches'):
result = [
MatchesRecommendationType(uuid=uuid, matches=matches)
for uuid, matches in job.result['results'].items()
]
elif (job.result) and (job_type == 'unify'):
errors = job.result['errors']
result = [
UnifyResultType(merged=job.result['results'])
]
elif status == 'failed':
errors = [job.exc_info]
return JobType(job_id=job_id,
job_type=job_type,
status=status,
result=result,
errors=errors,
enqueued_at=enqueued_at)
@check_auth
def resolve_jobs(self, info, page=1, page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE):
jobs = get_jobs()
result = []
for job in jobs:
job_id = job.get_id()
status = job.get_status()
job_type = job.func_name.split('.')[-1]
enqueued_at = job.enqueued_at
result.append(JobType(job_id=job_id,
job_type=job_type,
status=status,
result=[],
errors=[],
enqueued_at=enqueued_at))
return JobPaginatedType.create_paginated_result(result,
page,
page_size=page_size)
@check_auth
def resolve_transactions(self, info, filters=None,
page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE,
**kwargs):
query = Transaction.objects.order_by('created_at')
if filters and 'tuid' in filters:
query = query.filter(tuid=filters['tuid'])
if filters and 'name' in filters:
query = query.filter(name=filters['name'])
if filters and 'is_closed' in filters:
query = query.filter(is_closed=filters['isClosed'])
if filters and 'from_date' in filters:
query = query.filter(created_at__gte=filters['from_date'])
if filters and 'to_date' in filters:
query = query.filter(created_at__lte=filters['to_date'])
if filters and 'authored_by' in filters:
query = query.filter(authored_by=filters['authored_by'])
return TransactionPaginatedType.create_paginated_result(query,
page,
page_size=page_size)
@check_auth
def resolve_operations(self, info, filters=None,
page=1,
page_size=settings.DEFAULT_GRAPHQL_PAGE_SIZE,
**kwargs):
query = Operation.objects.order_by('timestamp')
if filters and 'ouid' in filters:
query = query.filter(ouid=filters['ouid'])
if filters and 'op_type' in filters:
query = query.filter(op_type=filters['op_type'])
if filters and 'entity_type' in filters:
query = query.filter(entity_type=filters['entity_type'])
if filters and 'target' in filters:
query = query.filter(target=filters['target'])
if filters and 'from_date' in filters:
query = query.filter(timestamp__gte=filters['from_date'])
if filters and 'to_date' in filters:
query = query.filter(timestamp__lte=filters['to_date'])
return OperationPaginatedType.create_paginated_result(query,
page,
page_size=page_size)
class SortingHatMutation(graphene.ObjectType):
add_organization = AddOrganization.Field(
description='Add an organization to the registry.'
)
delete_organization = DeleteOrganization.Field(
description='Remove an organization from the registry. Related information\
such as domains or enrollments is also removed.'
)
add_domain = AddDomain.Field(
description='Add a new domain to an organization. The new domain is set\
as a top domain by default. A domain can only be assigned to one organization.'
)
delete_domain = DeleteDomain.Field(
description='Remove a domain from the registry.'
)
add_identity = AddIdentity.Field(
description='Add a new identity to the registry. A new individual will be\
also added and associated to the new identity unless an `uuid` is provided.\
When `uuid` is set, it creates a new identity associated to the individual\
defined by this identifier.'
)
delete_identity = DeleteIdentity.Field(
description='Remove an identity from the registry. If the `uuid` also\
belongs to an individual, this entry and those identities linked to it\
will be removed too.'
)
update_profile = UpdateProfile.Field(
description='Update an individual profile.'
)
move_identity = MoveIdentity.Field(
description='Shift the identity identified by `from_uuid` to the individual\
identified by `to_uuid`.'
)
lock = Lock.Field(
description='Lock an individual so it cannot be modified.'
)
unlock = Unlock.Field(
description='Unlock an individual so it can be modified.'
)
merge = Merge.Field(
description='Join a list of individuals, defined in `from_uuid` by any of\
their valid identities ids, into `to_uuid` individual. Identities and enrollments\
related to each `from_uuid` will be assigned to `to_uuid`. In addition, each\
`from_uuid` will be removed from the registry.'
)
unmerge_identities = UnmergeIdentities.Field(
description='Separate a list of `uuid` identities, creating an individual for each one.'
)
enroll = Enroll.Field(
description='Enroll an individual in an organization. Existing enrollments\
for the same individual and organization which overlap with the new period\
will be merged into a single enrollment.'
)
withdraw = Withdraw.Field(
description='Withdraw an individual identified by `uuid` from the given\
`organization` during the given period of time.'
)
update_enrollment = UpdateEnrollment.Field(
description='Update one or more enrollments from an individual given a new\
date range. By default, `force` is set to `true`. In case any of the new\
dates are missing, the former value for that date will be preserved.'
)
recommend_affiliations = RecommendAffiliations.Field(
description='Recommend organizations for a list of individuals based on their emails.'
)
recommend_matches = RecommendMatches.Field(
description='Recommend identity matches for a list of individuals based\
on a list of criteria composed by `email`, `name` and/or `username`.'
)
affiliate = Affiliate.Field(
description='Affiliate a set of individuals using recommendations.'
)
unify = Unify.Field(
description='Unify a set of individuals by merging them using matching recommendations.'
)
# JWT authentication
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field(description='Verify a JSON Web Token.')
refresh_token = graphql_jwt.Refresh.Field(description='Refresh a JSON Web Token.')
|
987,488 | 10f92c61d572f98af2cd5f422457f156655f9ab5 | from contextlib import contextmanager
from py_sexpr import terms
import warnings
__metadata_to_wrap = terms.metadata
def metadata(line, col, filename, sexpr):
"""https://github.com/purescript-python/purescript-python/issues/8
"""
if line is 0:
line = 1
return __metadata_to_wrap(line, col, filename, sexpr)
# one of following approaches is okay
@contextmanager
def workaround():
"""contextually invoke this when loading *.src.py"""
__metadata_to_wrap.__globals__["metadata"] = metadata
try:
yield
finally:
__metadata_to_wrap.__globals__["metadata"] = __metadata_to_wrap
@contextmanager
def suppress_cpy38_literal_is():
"""https://github.com/purescript-python/purescript-python/issues/9"""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=SyntaxWarning, message='"is" with a literal'
)
yield
|
987,489 | bd3c43ac4eea33ff87908774159972b13438daeb | # #!/usr/bin/env python
# # -*- coding: utf-8 -*-
#
# # Example of `bridge' design pattern
# # This code is part of http://wp.me/p1Fz60-8y
# # Copyright (C) 2011 Radek Pazdera
#
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
#
# # This program is distributed in the hope that it will be useful,
# # but WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# # GNU General Public License for more details.
#
# # You should have received a copy of the GNU General Public License
# # along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# c_ AbstractInterface
# """ Target interface.
# This is the target interface, that clients use.
# """
#
# ___ someFunctionality
# r_ N..
#
#
# c_ Bridge AI..
# """ Bridge class.
#
# This class forms a bridge between the target
# interface and background implementation.
# """
#
# ___ -
# __implementation _ N..
#
#
# c_ UseCase1 B..
# """ Variant of the target interface.
# This is a variant of the target Abstract interface.
# It can do something little differently and it can
# also use various background implementations through
# the bridge.
# """
#
# ___ - implementation
# __? ?
#
# ___ someFunctionality
# print("UseCase1: ", __?.aF..
#
#
# c_ UseCase2 B..
# ___ - implementation
# __? ?
#
# ___ someFunctionality
# print("UseCase2: ", __?.aF..
#
#
# c_ ImplementationInterface
# """ Interface for the background implementation.
# This class defines how the Bridge communicates
# with various background implementations.
# """
#
# ___ anotherFunctionality
# r_ N...
#
#
# c_ Linux(II..
# """ Concrete background implementation.
# A variant of background implementation, in this
# case for Linux!
# """
#
# ___ anotherFunctionality
# print("Linux!")
#
#
# c_ Windows II..
# ___ anotherFunctionality
# print("Windows.")
#
#
# ___ main
# linux _ L..
# windows _ W..
#
# # Couple of variants under a couple
# # of operating systems.
# useCase _ U_1 l..
# ?.sF...
#
# useCase _ UseCase1 w..
# ?.sF...
#
# useCase _ UseCase2 l..
# ?.sF...
#
# useCase _ UseCase2 w..
# ?.sF...
#
#
# __ ______ __ ______
# ? |
987,490 | d91627af0ee0c94eb1c2ae0f4eca417d93ff3d5f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Schooner - Course Management System
# University of Turku / Faculty of Technilogy / Department of Computing
# (c) 2021, Jani Tammi <jasata@utu.fi>
#
# SubProcess.py - Class to run a subprocess and collect output
# 2021-09-11 Initial version.
#
import subprocess
class SubProcess:
def __init__(
self,
cmd: str,
shell: bool = False,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
):
"""If output is not wanted, set stdout/stderr = subprocess.DEVNULL."""
self.command = cmd
try:
if not shell:
# Set empty double-quotes as empty list item
# Required for commands like; ssh-keygen ... -N ""
cmd = ['' if i == '""' or i == "''" else i for i in cmd.split(" ")]
prc = subprocess.run(
cmd,
shell = shell,
stdout = stdout,
stderr = stderr
)
# Store result/output
self.returncode = prc.returncode
self.stdout = prc.stdout.decode("utf-8") if stdout == subprocess.PIPE else None
self.stderr = prc.stderr.decode("utf-8") if stderr == subprocess.PIPE else None
except Exception as e:
self.returncode = -2
self.stdout = ""
self.stderr = str(e)
# EOF |
987,491 | f1e2a59b84c968c4d4ece436736975d832a345e0 | from django.apps import AppConfig
class LawblogConfig(AppConfig):
name = 'lawBlog'
|
987,492 | bd8b2c967955572bd2f1b0fe70079b2b5fd9f00d | # from bottle import Bottle, route, run, get, template, post, request
# from cred import cred_consumer_key, cred_consumer_secret, cred_access_token, cred_access_token_secret
# import pymysql
# import time
# import tweepy
# import json
# import os
# import datetime
# import python_jwt as jwt
# import Crypto.PublicKey.RSA as RSA
# import logging
# import jws
#
# mysql_config = {
# 'host' : os.environ['MYSQL_ENDPOINT'],
# 'db' : os.environ['MYSQL_DATABASE'],
# 'user' : os.environ['MYSQL_USER'],
# 'passwd': os.environ['MYSQL_PASSWORD']
# }
# logger = logging.getLogger('tw-svc')
# logger.setLevel(logging.DEBUG)
# logger.propagete = False
# ch = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# ch.setFormatter(formatter)
# logger.addHandler(ch)
#
# app = Bottle()
#
# # Authentication details. To obtain these visit dev.twitter.com
# consumer_key = cred_consumer_key
# consumer_secret = cred_consumer_secret
# access_token = cred_access_token
# access_token_secret = cred_access_token_secret
#
# #clave publica para verificar token
# public_key_file = os.path.join(os.path.dirname(__file__), 'key', 'key.pub')
# with open(public_key_file, 'r') as fd:
# public_key = RSA.importKey(fd.read())
#
# class BreakLoopException(Exception):
# pass
#
# # This is the listener, resposible for receiving data
# class MyStreamListener(tweepy.StreamListener):
#
# def __init__(self,duration):
# tweepy.StreamListener.__init__(self)
# self.stream = None
# self.count = 0
# self.duration = duration
# self.start_time = None
# self.end_time = None
# return
#
# #def on_data(self, data):
# #logger.debug("\non_data\n")
# # Twitter returns data in JSON format - we need to decode it first
# #decoded = json.loads(data)
# # Also, we convert UTF-8 to ASCII ignoring all bad characters sent by users
# #print '@%s: %s' % (decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore'))
# #return True
#
# def on_connect(self):
# logger.debug("\non_connect\n")
# self.start_time = time.time()
# self.end_time = self.start_time + self.duration
# return
#
# def keep_alive(self):
# logger.debug("\nkeep_alive\n")
# now = time.time()
# if now > self.end_time:
# logger.debug("\nme tengo que ir\n")
# raise BreakLoopException('break the lop!')
#
# def on_error(self, status):
# print status
#
# def on_status(self, status):
# logger.debug("\non_status\n")
# now = time.time()
# if now < self.end_time:
# logger.debug("\ncuento el tweet\n")
# self.count = self.count + 1
# print self.count
# else:
# logger.debug('should disconnect')
# return False
#
#
# @app.post('/tw-svc/encuesta')
# def crear_encuesta():
# logger.info('ENCUESTA ')
# token_type, token = request.headers['Authorization'].split()
# print token
# usr = ""
# try:
# header, claims = jwt.verify_jwt(token, public_key, ['RS256'])
# except jws.exceptions.SignatureError:
# message = "Invalid token"
#
# usr = claims['userid']
#
# logger.info('USER : %s ', usr)
# auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# auth.set_access_token(access_token, access_token_secret)
#
# logger.info('Processing create ')
# data = request.json
# paramHash = data['hash']
# paramTime = data['time']
# paramSurvey = data['surveyname']
# logger.info('data : %s', paramHash)
#
# my_stream_listener = MyStreamListener(10) # paramTime es la duracion
# my_stream = tweepy.Stream( auth=auth, listener=my_stream_listener, chunk_size=1 )
#
# my_stream_listener.stream = my_stream
# try:
# my_stream.filter(track=[paramHash])
# except BreakLoopException:
# pass
# logger.debug('finalizo la cuenta')
# total = my_stream_listener.count
# logger.info('total : %s', total)
# my_stream.disconnect()
# # logger.info()
# cnx = None
# try:
# logger.info('connect : %s', mysql_config)
# cnx = pymysql.connect(**mysql_config)
# cursor = cnx.cursor()
# insert_test = "INSERT INTO jobs (username, surveyname, hash, tiempo, count) VALUES (%s, %s,%s, %s, %s)"
# data = (usr, paramSurvey, paramHash, paramTime, total) # tupla s
# cursor.execute(insert_test, data)
# cnx.commit()
# cursor.close()
# ret = {"status": "OK"}
# except pymysql.Error as err:
# logger.info('error : %s', err)
# ret = {"status": "FAIL", "msg": err}
# finally:
# if cnx:
# cnx.close()
# return ret
#
# @app.get('/tw-svc/all')
# def retornar_encuestas():
# token_type, token = request.headers['Authorization'].split()
# ret = {"status":"OK"}
# usr = ""
# try:
# header, claims = jwt.verify_jwt(token, public_key, ['RS256'])
# except jws.exceptions.SignatureError:
# message = "Invalid token"
# usr = claims['userid']
#
# logger.info('USER : %s', usr)
# print "antes de cnx"
# print usr
# # cnx = None
# print "hago consulta"
# try:
# logger.info('connect : %s', mysql_config)
# cnx = pymysql.connect(**mysql_config)
# cursor = cnx.cursor()
# select_test = "SELECT * FROM jobs WHERE username = %s"
# logger.info('ahoraHaceElSELECT : %s', select_test)
# data = (claims['userid'])
# cursor.execute(select_test, data)
# results = cursor.fetchall()
# logger.info('results : %s', results)
# ret = {"status":"OK", "table":results}
# cnx.commit()
# cursor.close()
# except pymysql.Error as err:
# logger.info('error : %s', err)
# ret = {"status": "FAIL", "msg": err}
# finally:
# logger.info('FINALLY')
# if cnx:
# cnx.close()
# logger.info('RETURN RET: %s', ret)
# return ret
#
# @app.route('/jobs', method='GET')
# def get_jobs():
# logger.info('Processing GET /jobs')
# token_type, token = request.headers['Authorization'].split()
# logger.debug('token_type: {}, token: '.format(token_type, token))
#
# try:
# header, claims = jwt.verify_jwt(token, public_key, ['RS256'])
# except jws.exceptions.SignatureError:
# logger.warn('invalid token signature!')
# message = "invalid token"
# response.status = 400
# ret_data = {
# "message": message
# }
#
# results = db_get_jobs(claims['userId'])
#
# if results.ok:
# ret_data = {
# 'jobs' : []
# }
# return ret_data
#
# run(app, host='0.0.0.0', port=8088)
|
987,493 | 2434e0e2a7ffaf53a779aa120ce255edea6ceb75 | from functools import partial
import json
import os
from ubuntui.ev import EventLoop
from subprocess import run, PIPE
from conjureup import controllers
from conjureup import juju
from conjureup import async
from conjureup.app_config import app
from conjureup.ui.views.service_walkthrough import ServiceWalkthroughView
from conjureup import utils
from conjureup.api.models import model_info
from .common import get_bundleinfo
class DeployController:
def __init__(self):
self.bundle_filename = None
self.bundle = None
self.services = []
self.svc_idx = 0
self.showing_error = False
self.is_predeploy_queued = False
def _handle_exception(self, tag, exc):
utils.pollinate(app.session_id, tag)
app.ui.show_exception_message(exc)
self.showing_error = True
EventLoop.remove_alarms()
def _pre_deploy_exec(self):
""" runs pre deploy script if exists
"""
app.env['JUJU_PROVIDERTYPE'] = model_info(
juju.get_current_model())['provider-type']
pre_deploy_sh = os.path.join(app.config['spell-dir'],
'conjure/steps/00_pre-deploy')
if os.path.isfile(pre_deploy_sh) \
and os.access(pre_deploy_sh, os.X_OK):
utils.pollinate(app.session_id, 'J001')
msg = "Running pre-deployment tasks."
app.log.debug(msg)
app.ui.set_footer(msg)
return run(pre_deploy_sh,
shell=True,
stdout=PIPE,
stderr=PIPE,
env=app.env)
return json.dumps({'message': 'No pre deploy necessary',
'returnCode': 0,
'isComplete': True})
def _pre_deploy_done(self, future):
try:
result = json.loads(future.result().stdout.decode())
except AttributeError:
result = json.loads(future.result())
except:
return self._handle_exception(
'E003',
Exception(
"Problem with pre-deploy: \n{}, ".format(
future.result())))
app.log.debug("pre_deploy_done: {}".format(result))
if result['returnCode'] > 0:
utils.pollinate(app.session_id, 'E003')
return self._handle_exception('E003', Exception(
'There was an error during the pre '
'deploy processing phase: {}.'.format(result)))
else:
app.ui.set_footer("Pre-deploy processing done.")
def finish(self, single_service=None):
"""handles deployment
Arguments:
single_service: a dict for the service that was just
configured. finish will schedule a deploy for it and
call render() again to display the next one.
if service is None, schedules deploys for all remaining services,
schedules relations, then continues to next controller
"""
if single_service:
juju.deploy_service(single_service,
app.ui.set_footer,
partial(self._handle_exception, "ED"))
self.svc_idx += 1
return self.render()
else:
for service in self.services[self.svc_idx:]:
juju.deploy_service(service,
app.ui.set_footer,
partial(self._handle_exception, "ED"))
juju.set_relations(self.services,
app.ui.set_footer,
partial(self._handle_exception, "ED"))
if app.bootstrap.running and not app.bootstrap.running.done():
return controllers.use('bootstrapwait').render()
else:
return controllers.use('deploystatus').render()
utils.pollinate(app.session_id, 'PC')
def render(self):
if not self.is_predeploy_queued:
try:
future = async.submit(self._pre_deploy_exec,
partial(self._handle_exception, 'E003'),
queue_name=juju.JUJU_ASYNC_QUEUE)
self.is_predeploy_queued = True
future.add_done_callback(self._pre_deploy_done)
except Exception as e:
return self._handle_exception('E003', e)
if self.showing_error:
return
if not self.bundle:
self.bundle_filename, self.bundle, self.services = get_bundleinfo()
juju.add_machines([md for _, md in self.bundle.machines.items()],
exc_cb=partial(self._handle_exception, "ED"))
n_total = len(self.services)
if self.svc_idx >= n_total:
return self.finish()
service = self.services[self.svc_idx]
wv = ServiceWalkthroughView(service, self.svc_idx, n_total,
app.metadata_controller, self.finish)
app.ui.set_header("Review and Configure Applications")
app.ui.set_body(wv)
_controller_class = DeployController
|
987,494 | f4f0dde0e2c0b06d56750cb4788ba66861caf244 | # age=int(input("请输入年龄: "))
#
# if age>=18:
# print("恭喜你,你成年了")
# elif 18>age>=0:
# print("你还太小了 ")
# else:
# print("你输入有误")
#
#
# l=[[5,6,9,3,7],[1,2,3,4,]]
# for i in l:
# print(i)
# for a in i:
# print(a)
#
# sum=0
# for i in range(101):
# sum=sum+i
# print(sum)
# for i in range(1,6):
# for j in range(1,i+1):
# print('*',end='')
# print()
# def zhengshu(m,k):
# sum=0
# for i in range(m,k):
# sum+=i
# # print("求和的值是:",sum)
# return sum
# zhengshu(1,6)
#
# i=1
# sum=0
# while i<=100:
# sum = sum + i
# i=i+1
#
# print("总数",sum)
# zhenghsu(name=100)
#
#
#
# def add_two(a,b):
# return (a+b)
# print(add_two(1,3))
# def che_list(l):
# if len(l)>2:
# new=l[0:2]
# return new
#
#
# l=[1,2,3,4,5]
# new=che_list(l)
# print(new)
#动态参数/不定长参数*args arguments
# def make_sandwich(*args):
# # print(*args)
# all=""
# for item in args:
# all=all+item
# all=all+'.'
# print("你的包含了这些菜品"+all)
# return all
# make_sandwich("生菜","菠菜",'土豆')
# make_sandwich("wohaiza")
# lls = (78, 'stupid')
# dds = dict(k1=1, k2=2, k3=3, name='stupid', num=76)
#
# def unpack(**word):
#
# print('hope stupid..',word.values())
#
#
# unpack(**dds) |
987,495 | cc2f394bc6623013980a56a313b3446544b6279d | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-06 09:22
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lyceum', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lyceum_news',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2017, 1, 6, 13, 22, 32, 176091), verbose_name='Дата и время публикации'),
),
]
|
987,496 | 3de4dad21e9eaa9fb250ac6ac9eb9df9b69d2a84 | # C111152 임종욱
from random import randint
dice_1 = int(randint(1,6))
dice_2 = int(randint(1,6))
Sum = dice_1 + dice_2
user = int(input('원하는 두 주사위의 합을 입력하시오? '))
while Sum != user:
print('첫번째 주사위=',dice_1,'두번째 주사위=',dice_2,'합 = ',Sum)
dice_1 = int(randint(1,6))
dice_2 = int(randint(1,6))
Sum = dice_1 + dice_2
print('첫번째 주사위= ',dice_1,' 두번째 주사위= ', dice_2,'합 = ',Sum)
print('원하는 합이 나왔습니다. 합=',Sum) |
987,497 | 5545e4614a8d1208728c6dcbfa45ee0f5bad1039 | import Database
import datetime
import compiler
def rodarFilaArquivos(every_minute):
print("Rodando Fila de Arquivos a cada " + str(every_minute) + " minuto(s)")
rodou = False
minute = datetime.datetime.now().strftime('%M')
while(True):
if minute != datetime.datetime.now().strftime('%M'):
minute = datetime.datetime.now().strftime('%M')
rodou = False
if not (int(datetime.datetime.now().strftime('%M')) % every_minute) and not rodou:
print("Verificando..")
rodou = True
with Database.Database('rubik_platform.db') as db:
envios = db.query('SELECT * FROM envios WHERE env_status = 0')
if envios:
for envio in envios:
print(envio)
with Database.Database('rubik_platform.db') as db:
estados = db.query(
'SELECT * FROM estados_cubo ORDER BY cub_robo DESC LIMIT 5', ())
if estados:
comp = compiler.Compiler(envio['env_filename'])
success = comp.Compile(estados)
if success:
print("sucesso")
with Database.Database('rubik_platform.db') as db:
db.execute("UPDATE envios SET env_status = ? WHERE env_id = ?", (1, envio['env_id']))
else:
print("erro")
with Database.Database('rubik_platform.db') as db:
db.execute("UPDATE envios SET env_status = ? WHERE env_id = ?", (2, envio['env_id']))
else:
print("Não há estados disponiveis")
print("fim")
print(__name__)
print("CronArquivo")
if __name__ == "__main__":
rodarFilaArquivos(1)
|
987,498 | fb1115b2e725ac6c91a253063a5be034b57ae4bf | from StringIO import StringIO
import os
from tempfile import mkstemp
import unittest
from mock import patch, mock_open
import sys
from striketracker import Command, APIError
class TestStrikeTrackerCommand(unittest.TestCase):
def setUp(self):
self.fd, self.cache = mkstemp()
self._stdout = sys.stdout
sys.stdout = StringIO()
self._stderr = sys.stderr
sys.stderr = StringIO()
self._stdin = sys.stdin
sys.stdin = StringIO()
def tearDown(self):
os.close(self.fd)
os.unlink(self.cache)
sys.stdout = self._stdout
sys.stderr = self._stderr
sys.stdin = self._stdin
def test_print_help(self):
sys.argv = ['striketracker']
command = Command()
self.assertIn(
'usage: striketracker [-h] [--token TOKEN] [-v] action\n\nCommand line interface to the Highwinds CDN',
sys.stdout.getvalue())
def test_print_unknown_command(self):
sys.argv = ['striketracker', 'nuke']
command = Command()
self.assertIn(
'Unknown command: nuke\n',
sys.stderr.getvalue())
@patch('getpass.getpass')
@patch('__builtin__.raw_input')
@patch('striketracker.APIClient.create_token')
def test_init(self, create_token, raw_input, getpw):
sys.argv = ['striketracker', 'init']
create_token.return_value = 'rikkitikkitavi'
raw_input.return_value = 'bob'
getpw.return_value = 'password1'
command = Command(cache=self.cache)
self.assertTrue(create_token.called)
create_token.assert_called_with(username='bob', password='password1', application=None)
self.assertEqual(command.cache.get('token'), 'rikkitikkitavi')
self.assertEqual('Initializing configuration...\nSuccessfully saved token\n',
sys.stdout.getvalue())
@patch('striketracker.APIClient.create_token')
def test_init_token_supplied(self, create_token):
sys.argv = ['striketracker', 'init', '--token', 'foobar']
command = Command(cache=self.cache)
self.assertFalse(create_token.called)
self.assertEqual(command.cache.get('token'), 'foobar')
self.assertEqual('Initializing configuration...\nSuccessfully saved token\n',
sys.stdout.getvalue())
@patch('striketracker.APIClient.version')
def test_version(self, version):
sys.argv = ['striketracker', 'version']
version.return_value = '3.0.4-1600'
Command()
self.assertTrue(version.called)
self.assertEqual('3.0.4-1600\n', sys.stdout.getvalue())
@patch('striketracker.APIClient.version')
@patch('logging.getLogger')
def test_version_verbose(self, getLogger, version):
sys.argv = ['striketracker', 'version', '--verbose']
version.return_value = '3.0.4-1600'
Command()
getLogger.assert_called_with('requests.packages.urllib3')
self.assertTrue(version.called)
self.assertEqual('3.0.4-1600\n', sys.stdout.getvalue())
@patch('striketracker.APIClient.me')
@patch('striketracker.ConfigurationCache.get')
def test_me(self, get, me):
sys.argv = ['striketracker', 'me']
get.return_value = 'cachedtoken'
me.return_value = {
'firstName': 'Bob',
'lastName': 'Saget'
}
command = Command()
self.assertTrue(me.called)
self.assertEqual('cachedtoken', command.client.token)
self.assertEqual("""firstName: Bob
lastName: Saget
""", sys.stdout.getvalue())
@patch('striketracker.APIClient.me')
@patch('striketracker.ConfigurationCache.get')
def test_me_token(self, get, me):
sys.argv = ['striketracker', 'me', '--token', 'foobarwinniethefoobar']
get.return_value = 'cachedtoken'
me.return_value = {
'firstName': 'Bob',
'lastName': 'Saget'
}
command = Command()
self.assertTrue(me.called)
self.assertEqual('foobarwinniethefoobar', command.client.token)
self.assertEqual("""firstName: Bob
lastName: Saget
""", sys.stdout.getvalue())
def test_purge_no_hash(self):
sys.argv = ['striketracker', 'purge', '--token', 'foobarwinniethefoobar']
with self.assertRaises(SystemExit) as e:
command = Command()
self.assertIn('too few arguments', sys.stderr.getvalue())
def test_purge_no_token(self):
sys.argv = ['striketracker', 'purge', 'x1x2x3x4']
with self.assertRaises(SystemExit) as e:
command = Command(cache=self.cache)
self.assertIn('This command requires authentication', sys.stderr.getvalue())
@patch('striketracker.APIClient.purge')
def test_purge(self, purge):
sys.argv = ['striketracker', 'purge', 'x1x2x3x4', '--token', 'foobarwinniethefoobar']
sys.stdin.write('//cdn.foo.com/main.js\n//cdn.foo.com/main.css')
sys.stdin.seek(0)
command = Command()
purge.assert_called_with('x1x2x3x4', [
{
"url": "//cdn.foo.com/main.js",
"purgeAllDynamic": False,
"recursive": False,
"invalidateOnly": False
},
{
"url": "//cdn.foo.com/main.css",
"purgeAllDynamic": False,
"recursive": False,
"invalidateOnly": False
}
])
@patch('striketracker.APIClient.purge_status')
@patch('striketracker.APIClient.purge')
def test_purge_poll(self, purge, purge_status):
sys.argv = ['striketracker', 'purge', 'x1x2x3x4', '--token', 'foobarwinniethefoobar', '--poll']
sys.stdin.write('//cdn.foo.com/main.js\n//cdn.foo.com/main.css')
sys.stdin.seek(0)
purge.return_value = 'cmu34ctmy3408xmy'
purge_status.side_effect = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
command = Command()
purge_status.assert_called_with('x1x2x3x4', 'cmu34ctmy3408xmy')
self.assertEqual('Reading urls from stdin\nSending purge.........Done!\n', sys.stderr.getvalue())
@patch('striketracker.APIClient.purge')
def test_purge_fails(self, purge):
sys.argv = ['striketracker', 'purge', 'x1x2x3x4', '--token', 'foobarwinniethefoobar']
sys.stdin.write('//cdn.foo.com/main.js\n//cdn.foo.com/main.css')
sys.stdin.seek(0)
purge.side_effect = APIError('Could not send purge to the CDN', None)
with self.assertRaises(SystemExit):
command = Command()
@patch('striketracker.APIClient.purge')
def test_purge_options(self, purge):
sys.stdin.write('//cdn.foo.com/main.js\n//cdn.foo.com/main.css')
os.write(self.fd, 'token: foobar')
for option in ['--purge-all-dynamic', '--recursive', '--invalidate-only']:
sys.argv = ['striketracker', 'purge', 'x1x2x3x4', option]
sys.stdin.seek(0)
command = Command(cache=self.cache)
purge.assert_called_with('x1x2x3x4', [
{
"url": "//cdn.foo.com/main.js",
"purgeAllDynamic": option == '--purge-all-dynamic',
"recursive": option == '--recursive',
"invalidateOnly": option == '--invalidate-only'
},
{
"url": "//cdn.foo.com/main.css",
"purgeAllDynamic": option == '--purge-all-dynamic',
"recursive": option == '--recursive',
"invalidateOnly": option == '--invalidate-only'
}
])
@patch('striketracker.APIClient.purge_status')
def test_purge_status(self, purge_status):
sys.argv = ['striketracker', 'purge_status', 'x1x2x3x4', 'cmu34ctmy3408xmy']
os.write(self.fd, 'token: foobar')
purge_status.return_value = 0.75
command = Command(cache=self.cache)
purge_status.assert_called_with('x1x2x3x4', 'cmu34ctmy3408xmy')
self.assertEqual('0.75\n', sys.stdout.getvalue()) |
987,499 | 6d0c5f8d566eacf761308736e3662e18f41c8b4f | import matplotlib.pyplot as plt
import numpy as np
from instagramy import InstagramUser
import sys
"""
Usage:
python instalysis.py <text_file_of_usernames>
"""
try:
filename = sys.argv[1]
except (IndexError, KeyError):
print("List of username as textfile in arguement")
usernames = []
file = open(filename, "r")
for line in file:
if line != "\n":
usernames.append(str(line).strip())
followers = []
following = []
posts = []
for username in usernames:
user = InstagramUser(username)
followers.append(user.number_of_followers)
following.append(user.number_of_followings)
posts.append(user.number_of_posts)
x = np.arange(len(usernames)) # the label locations
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x + 0.2, followers, width, label="Followers")
rects2 = ax.bar(x, following, width, label="Following")
rects3 = ax.bar(x - 0.2, posts, width, label="Posts")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel("Popularity")
ax.yaxis.set_visible(False)
ax.set_title("Username")
ax.set_xticks(x)
ax.set_xticklabels(usernames)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(
"{}".format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha="center",
va="bottom",
)
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
fig.tight_layout()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.