text stringlengths 38 1.54M |
|---|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Image, Emoji, ImageEmojiRelationship
from .serializers import *
@api_view(['GET'])
def all_images_list(request):
data = Image.objects.all()
serializer = ImageSerializer(
data,
context={'request': request},
many=True
)
return Response({
'data': serializer.data
})
@api_view(['GET'])
def image_list(request, slug):
# Get emoji
images = ImageEmojiRelationship.objects.filter(emoji__name__exact=slug)
relSerializer = ImageEmojiRelSerializer(
images,
context={'request': request},
many=True
)
memes = []
for relObject in relSerializer.data:
meme = Image.objects.get(pk=relObject['image'])
memes.append(meme)
imageSerializer = ImageSerializer(
memes,
context={'request': request},
many=True
)
return Response({
'data': imageSerializer.data
})
@api_view(['GET'])
def emoji_list(request):
data = Emoji.objects.all()
serializer = EmojiSerializer(
data,
context={'request': request},
many=True
)
return Response({
'data': serializer.data
})
|
from sqlobject import *
class IcsSQLObject(SQLObject):
uuid = StringCol(unique=True, varchar=True, length=36)
def get_identifier(self):
return self.uuid
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'frmMASGDCGUI.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_frmMASGDC(object):
def setupUi(self, frmMASGDC):
frmMASGDC.setObjectName("frmMASGDC")
frmMASGDC.resize(869, 698)
self.btnInFile = QtWidgets.QPushButton(frmMASGDC)
self.btnInFile.setGeometry(QtCore.QRect(800, 20, 51, 32))
self.btnInFile.setObjectName("btnInFile")
self.label_33 = QtWidgets.QLabel(frmMASGDC)
self.label_33.setGeometry(QtCore.QRect(30, 20, 211, 16))
self.label_33.setObjectName("label_33")
self.btnOutFile = QtWidgets.QPushButton(frmMASGDC)
self.btnOutFile.setGeometry(QtCore.QRect(800, 60, 51, 32))
self.btnOutFile.setObjectName("btnOutFile")
self.txtInFile = QtWidgets.QLineEdit(frmMASGDC)
self.txtInFile.setGeometry(QtCore.QRect(210, 20, 581, 21))
self.txtInFile.setText("")
self.txtInFile.setObjectName("txtInFile")
self.btnConvert = QtWidgets.QPushButton(frmMASGDC)
self.btnConvert.setGeometry(QtCore.QRect(700, 645, 141, 32))
self.btnConvert.setObjectName("btnConvert")
self.label_35 = QtWidgets.QLabel(frmMASGDC)
self.label_35.setGeometry(QtCore.QRect(30, 60, 211, 16))
self.label_35.setObjectName("label_35")
self.txtOutFile = QtWidgets.QLineEdit(frmMASGDC)
self.txtOutFile.setGeometry(QtCore.QRect(210, 60, 581, 21))
self.txtOutFile.setText("")
self.txtOutFile.setObjectName("txtOutFile")
self.btnClose = QtWidgets.QPushButton(frmMASGDC)
self.btnClose.setGeometry(QtCore.QRect(30, 645, 141, 32))
self.btnClose.setObjectName("btnClose")
self.tabWidget = QtWidgets.QTabWidget(frmMASGDC)
self.tabWidget.setGeometry(QtCore.QRect(30, 150, 811, 471))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(20, 70, 60, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(20, 110, 60, 16))
self.label_3.setObjectName("label_3")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(360, 10, 61, 16))
self.label.setObjectName("label")
self.txtITrLabel = QtWidgets.QComboBox(self.tab)
self.txtITrLabel.setGeometry(QtCore.QRect(240, 110, 121, 26))
self.txtITrLabel.setEditable(True)
self.txtITrLabel.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.txtITrLabel.setObjectName("txtITrLabel")
self.txtITrData = QtWidgets.QComboBox(self.tab)
self.txtITrData.setGeometry(QtCore.QRect(240, 70, 121, 26))
self.txtITrData.setEditable(True)
self.txtITrData.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.txtITrData.setObjectName("txtITrData")
self.txtITeLabel = QtWidgets.QComboBox(self.tab)
self.txtITeLabel.setGeometry(QtCore.QRect(390, 110, 121, 26))
self.txtITeLabel.setEditable(True)
self.txtITeLabel.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.txtITeLabel.setObjectName("txtITeLabel")
self.txtITeData = QtWidgets.QComboBox(self.tab)
self.txtITeData.setGeometry(QtCore.QRect(390, 70, 121, 26))
self.txtITeData.setEditable(True)
self.txtITeData.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.txtITeData.setObjectName("txtITeData")
self.label_7 = QtWidgets.QLabel(self.tab)
self.label_7.setGeometry(QtCore.QRect(285, 40, 81, 16))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.tab)
self.label_8.setGeometry(QtCore.QRect(430, 40, 81, 16))
self.label_8.setObjectName("label_8")
self.txtFoldID = QtWidgets.QComboBox(self.tab)
self.txtFoldID.setGeometry(QtCore.QRect(240, 150, 121, 26))
self.txtFoldID.setEditable(True)
self.txtFoldID.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContentsOnFirstShow)
self.txtFoldID.setObjectName("txtFoldID")
self.label_9 = QtWidgets.QLabel(self.tab)
self.label_9.setGeometry(QtCore.QRect(20, 150, 121, 16))
self.label_9.setObjectName("label_9")
self.lbFoldID = QtWidgets.QLabel(self.tab)
self.lbFoldID.setGeometry(QtCore.QRect(390, 150, 251, 16))
self.lbFoldID.setObjectName("lbFoldID")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox = QtWidgets.QGroupBox(self.tab_2)
self.groupBox.setGeometry(QtCore.QRect(30, 20, 761, 80))
self.groupBox.setObjectName("groupBox")
self.cbScale = QtWidgets.QCheckBox(self.groupBox)
self.cbScale.setGeometry(QtCore.QRect(20, 40, 641, 21))
self.cbScale.setChecked(True)
self.cbScale.setObjectName("cbScale")
self.txtPowert = QtWidgets.QLineEdit(self.tab_2)
self.txtPowert.setGeometry(QtCore.QRect(620, 200, 160, 21))
self.txtPowert.setObjectName("txtPowert")
self.cbWarmStart = QtWidgets.QCheckBox(self.tab_2)
self.cbWarmStart.setGeometry(QtCore.QRect(240, 360, 191, 20))
self.cbWarmStart.setChecked(False)
self.cbWarmStart.setObjectName("cbWarmStart")
self.label_22 = QtWidgets.QLabel(self.tab_2)
self.label_22.setGeometry(QtCore.QRect(430, 240, 181, 16))
self.label_22.setObjectName("label_22")
self.txtNJobs = QtWidgets.QLineEdit(self.tab_2)
self.txtNJobs.setGeometry(QtCore.QRect(620, 320, 160, 21))
self.txtNJobs.setObjectName("txtNJobs")
self.label_20 = QtWidgets.QLabel(self.tab_2)
self.label_20.setGeometry(QtCore.QRect(430, 160, 181, 16))
self.label_20.setObjectName("label_20")
self.label_24 = QtWidgets.QLabel(self.tab_2)
self.label_24.setGeometry(QtCore.QRect(40, 240, 191, 16))
self.label_24.setObjectName("label_24")
self.txtL1Rate = QtWidgets.QLineEdit(self.tab_2)
self.txtL1Rate.setGeometry(QtCore.QRect(620, 160, 160, 21))
self.txtL1Rate.setObjectName("txtL1Rate")
self.cbAverageParm = QtWidgets.QCheckBox(self.tab_2)
self.cbAverageParm.setGeometry(QtCore.QRect(40, 360, 191, 20))
self.cbAverageParm.setChecked(False)
self.cbAverageParm.setObjectName("cbAverageParm")
self.cbFitIntercept = QtWidgets.QCheckBox(self.tab_2)
self.cbFitIntercept.setGeometry(QtCore.QRect(240, 400, 191, 20))
self.cbFitIntercept.setChecked(True)
self.cbFitIntercept.setObjectName("cbFitIntercept")
self.cbLearningRate = QtWidgets.QComboBox(self.tab_2)
self.cbLearningRate.setGeometry(QtCore.QRect(240, 160, 161, 26))
self.cbLearningRate.setObjectName("cbLearningRate")
self.label_28 = QtWidgets.QLabel(self.tab_2)
self.label_28.setGeometry(QtCore.QRect(40, 320, 191, 16))
self.label_28.setObjectName("label_28")
self.label_25 = QtWidgets.QLabel(self.tab_2)
self.label_25.setGeometry(QtCore.QRect(430, 280, 181, 16))
self.label_25.setObjectName("label_25")
self.txtAlpha = QtWidgets.QLineEdit(self.tab_2)
self.txtAlpha.setGeometry(QtCore.QRect(620, 240, 160, 21))
self.txtAlpha.setObjectName("txtAlpha")
self.label_23 = QtWidgets.QLabel(self.tab_2)
self.label_23.setGeometry(QtCore.QRect(430, 200, 181, 16))
self.label_23.setObjectName("label_23")
self.label_19 = QtWidgets.QLabel(self.tab_2)
self.label_19.setGeometry(QtCore.QRect(40, 120, 191, 16))
self.label_19.setObjectName("label_19")
self.txtMaxIter = QtWidgets.QLineEdit(self.tab_2)
self.txtMaxIter.setGeometry(QtCore.QRect(240, 240, 160, 21))
self.txtMaxIter.setObjectName("txtMaxIter")
self.cbLoss = QtWidgets.QComboBox(self.tab_2)
self.cbLoss.setGeometry(QtCore.QRect(240, 120, 161, 26))
self.cbLoss.setObjectName("cbLoss")
self.label_30 = QtWidgets.QLabel(self.tab_2)
self.label_30.setGeometry(QtCore.QRect(430, 360, 191, 16))
self.label_30.setObjectName("label_30")
self.txtEta0 = QtWidgets.QLineEdit(self.tab_2)
self.txtEta0.setGeometry(QtCore.QRect(240, 200, 160, 21))
self.txtEta0.setObjectName("txtEta0")
self.txtVerbose = QtWidgets.QLineEdit(self.tab_2)
self.txtVerbose.setGeometry(QtCore.QRect(240, 280, 160, 21))
self.txtVerbose.setObjectName("txtVerbose")
self.txtTol = QtWidgets.QLineEdit(self.tab_2)
self.txtTol.setGeometry(QtCore.QRect(620, 280, 160, 21))
self.txtTol.setObjectName("txtTol")
self.cbPenalty = QtWidgets.QComboBox(self.tab_2)
self.cbPenalty.setGeometry(QtCore.QRect(620, 120, 161, 26))
self.cbPenalty.setObjectName("cbPenalty")
self.label_13 = QtWidgets.QLabel(self.tab_2)
self.label_13.setGeometry(QtCore.QRect(430, 120, 181, 16))
self.label_13.setObjectName("label_13")
self.cbShuffle = QtWidgets.QCheckBox(self.tab_2)
self.cbShuffle.setGeometry(QtCore.QRect(40, 400, 191, 20))
self.cbShuffle.setChecked(True)
self.cbShuffle.setObjectName("cbShuffle")
self.txtEpochs = QtWidgets.QLineEdit(self.tab_2)
self.txtEpochs.setGeometry(QtCore.QRect(240, 320, 160, 21))
self.txtEpochs.setObjectName("txtEpochs")
self.label_15 = QtWidgets.QLabel(self.tab_2)
self.label_15.setGeometry(QtCore.QRect(40, 200, 191, 16))
self.label_15.setObjectName("label_15")
self.label_21 = QtWidgets.QLabel(self.tab_2)
self.label_21.setGeometry(QtCore.QRect(40, 160, 191, 16))
self.label_21.setObjectName("label_21")
self.label_27 = QtWidgets.QLabel(self.tab_2)
self.label_27.setGeometry(QtCore.QRect(40, 280, 191, 16))
self.label_27.setObjectName("label_27")
self.label_26 = QtWidgets.QLabel(self.tab_2)
self.label_26.setGeometry(QtCore.QRect(430, 320, 181, 16))
self.label_26.setObjectName("label_26")
self.txtEpsilon = QtWidgets.QLineEdit(self.tab_2)
self.txtEpsilon.setGeometry(QtCore.QRect(620, 360, 160, 21))
self.txtEpsilon.setObjectName("txtEpsilon")
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.txtFoldFrom = QtWidgets.QSpinBox(self.tab_3)
self.txtFoldFrom.setGeometry(QtCore.QRect(100, 30, 80, 24))
self.txtFoldFrom.setMaximum(100000)
self.txtFoldFrom.setProperty("value", 1)
self.txtFoldFrom.setObjectName("txtFoldFrom")
self.label_17 = QtWidgets.QLabel(self.tab_3)
self.label_17.setGeometry(QtCore.QRect(40, 30, 60, 16))
self.label_17.setObjectName("label_17")
self.txtFoldTo = QtWidgets.QSpinBox(self.tab_3)
self.txtFoldTo.setGeometry(QtCore.QRect(270, 30, 80, 24))
self.txtFoldTo.setMaximum(100000)
self.txtFoldTo.setProperty("value", 1)
self.txtFoldTo.setObjectName("txtFoldTo")
self.label_44 = QtWidgets.QLabel(self.tab_3)
self.label_44.setGeometry(QtCore.QRect(210, 30, 60, 16))
self.label_44.setObjectName("label_44")
self.tabWidget.addTab(self.tab_3, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.label_4 = QtWidgets.QLabel(self.tab_4)
self.label_4.setGeometry(QtCore.QRect(20, 30, 201, 16))
self.label_4.setObjectName("label_4")
self.txtFilter = QtWidgets.QLineEdit(self.tab_4)
self.txtFilter.setGeometry(QtCore.QRect(190, 30, 291, 21))
self.txtFilter.setObjectName("txtFilter")
self.label_5 = QtWidgets.QLabel(self.tab_4)
self.label_5.setGeometry(QtCore.QRect(490, 30, 211, 16))
self.label_5.setObjectName("label_5")
self.txtClass = QtWidgets.QTextEdit(self.tab_4)
self.txtClass.setGeometry(QtCore.QRect(190, 70, 91, 431))
self.txtClass.setReadOnly(True)
self.txtClass.setObjectName("txtClass")
self.label_10 = QtWidgets.QLabel(self.tab_4)
self.label_10.setGeometry(QtCore.QRect(20, 70, 201, 16))
self.label_10.setObjectName("label_10")
self.tabWidget.addTab(self.tab_4, "")
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName("tab_5")
self.cbAverage = QtWidgets.QCheckBox(self.tab_5)
self.cbAverage.setGeometry(QtCore.QRect(20, 30, 181, 20))
self.cbAverage.setChecked(True)
self.cbAverage.setObjectName("cbAverage")
self.cbPrecision = QtWidgets.QCheckBox(self.tab_5)
self.cbPrecision.setGeometry(QtCore.QRect(20, 70, 181, 20))
self.cbPrecision.setChecked(True)
self.cbPrecision.setObjectName("cbPrecision")
self.cbPrecisionAvg = QtWidgets.QComboBox(self.tab_5)
self.cbPrecisionAvg.setGeometry(QtCore.QRect(240, 70, 321, 26))
self.cbPrecisionAvg.setObjectName("cbPrecisionAvg")
self.cbAPrecisionAvg = QtWidgets.QComboBox(self.tab_5)
self.cbAPrecisionAvg.setGeometry(QtCore.QRect(240, 110, 321, 26))
self.cbAPrecisionAvg.setObjectName("cbAPrecisionAvg")
self.cbAPrecision = QtWidgets.QCheckBox(self.tab_5)
self.cbAPrecision.setGeometry(QtCore.QRect(20, 110, 231, 20))
self.cbAPrecision.setChecked(False)
self.cbAPrecision.setObjectName("cbAPrecision")
self.cbRecallAvg = QtWidgets.QComboBox(self.tab_5)
self.cbRecallAvg.setGeometry(QtCore.QRect(240, 150, 321, 26))
self.cbRecallAvg.setObjectName("cbRecallAvg")
self.cbRecall = QtWidgets.QCheckBox(self.tab_5)
self.cbRecall.setGeometry(QtCore.QRect(20, 150, 181, 20))
self.cbRecall.setChecked(True)
self.cbRecall.setObjectName("cbRecall")
self.cbF1 = QtWidgets.QCheckBox(self.tab_5)
self.cbF1.setGeometry(QtCore.QRect(20, 190, 181, 20))
self.cbF1.setChecked(True)
self.cbF1.setObjectName("cbF1")
self.cbF1Avg = QtWidgets.QComboBox(self.tab_5)
self.cbF1Avg.setGeometry(QtCore.QRect(240, 190, 321, 26))
self.cbF1Avg.setObjectName("cbF1Avg")
self.tabWidget.addTab(self.tab_5, "")
self.label_12 = QtWidgets.QLabel(frmMASGDC)
self.label_12.setGeometry(QtCore.QRect(185, 650, 501, 20))
self.label_12.setObjectName("label_12")
self.btnOutModel = QtWidgets.QPushButton(frmMASGDC)
self.btnOutModel.setGeometry(QtCore.QRect(800, 100, 51, 32))
self.btnOutModel.setObjectName("btnOutModel")
self.label_36 = QtWidgets.QLabel(frmMASGDC)
self.label_36.setGeometry(QtCore.QRect(30, 100, 231, 16))
self.label_36.setObjectName("label_36")
self.txtOutModel = QtWidgets.QLineEdit(frmMASGDC)
self.txtOutModel.setGeometry(QtCore.QRect(210, 100, 581, 21))
self.txtOutModel.setText("")
self.txtOutModel.setObjectName("txtOutModel")
self.retranslateUi(frmMASGDC)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(frmMASGDC)
frmMASGDC.setTabOrder(self.txtInFile, self.btnInFile)
frmMASGDC.setTabOrder(self.btnInFile, self.txtOutFile)
frmMASGDC.setTabOrder(self.txtOutFile, self.btnOutFile)
frmMASGDC.setTabOrder(self.btnOutFile, self.tabWidget)
frmMASGDC.setTabOrder(self.tabWidget, self.txtITrData)
frmMASGDC.setTabOrder(self.txtITrData, self.txtITrLabel)
frmMASGDC.setTabOrder(self.txtITrLabel, self.btnConvert)
frmMASGDC.setTabOrder(self.btnConvert, self.btnClose)
def retranslateUi(self, frmMASGDC):
_translate = QtCore.QCoreApplication.translate
frmMASGDC.setWindowTitle(_translate("frmMASGDC", "Stochastic Gradient Descent Classification"))
self.btnInFile.setText(_translate("frmMASGDC", "..."))
self.label_33.setText(_translate("frmMASGDC", "Input Data (per fold)"))
self.btnOutFile.setText(_translate("frmMASGDC", "..."))
self.btnConvert.setText(_translate("frmMASGDC", "Analyze"))
self.label_35.setText(_translate("frmMASGDC", "Analysis Results"))
self.btnClose.setText(_translate("frmMASGDC", "Close"))
self.label_2.setText(_translate("frmMASGDC", "Data"))
self.label_3.setText(_translate("frmMASGDC", "Label"))
self.label.setText(_translate("frmMASGDC", "Input"))
self.label_7.setText(_translate("frmMASGDC", "Train"))
self.label_8.setText(_translate("frmMASGDC", "Test"))
self.label_9.setText(_translate("frmMASGDC", "FoldID"))
self.lbFoldID.setText(_translate("frmMASGDC", "ID=None"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("frmMASGDC", "Data"))
self.groupBox.setTitle(_translate("frmMASGDC", "<Input Data Normalization>"))
self.cbScale.setText(_translate("frmMASGDC", "Scale Data Train~N(0,1) and Test~N(0,1)"))
self.txtPowert.setText(_translate("frmMASGDC", "0.5"))
self.cbWarmStart.setText(_translate("frmMASGDC", "Warm Start"))
self.label_22.setText(_translate("frmMASGDC", "Alpha"))
self.txtNJobs.setText(_translate("frmMASGDC", "1"))
self.label_20.setText(_translate("frmMASGDC", "L1 Ratio"))
self.label_24.setText(_translate("frmMASGDC", "Max Iteration (None=0)"))
self.txtL1Rate.setText(_translate("frmMASGDC", "0.15"))
self.cbAverageParm.setText(_translate("frmMASGDC", "Average"))
self.cbFitIntercept.setText(_translate("frmMASGDC", "Fit Intercept"))
self.label_28.setText(_translate("frmMASGDC", "Epochs (None=0)"))
self.label_25.setText(_translate("frmMASGDC", "Tolerance (None=0)"))
self.txtAlpha.setText(_translate("frmMASGDC", "0.0001"))
self.label_23.setText(_translate("frmMASGDC", "power_t"))
self.label_19.setText(_translate("frmMASGDC", "Loss"))
self.txtMaxIter.setText(_translate("frmMASGDC", "0"))
self.label_30.setText(_translate("frmMASGDC", "Epsilon"))
self.txtEta0.setText(_translate("frmMASGDC", "0"))
self.txtVerbose.setText(_translate("frmMASGDC", "0"))
self.txtTol.setText(_translate("frmMASGDC", "0.0001"))
self.label_13.setText(_translate("frmMASGDC", "Penalty"))
self.cbShuffle.setText(_translate("frmMASGDC", "Shuffle"))
self.txtEpochs.setText(_translate("frmMASGDC", "0"))
self.label_15.setText(_translate("frmMASGDC", "eta0"))
self.label_21.setText(_translate("frmMASGDC", "Learning Rate"))
self.label_27.setText(_translate("frmMASGDC", "Verbose"))
self.label_26.setText(_translate("frmMASGDC", "n_jobs (All = -1)"))
self.txtEpsilon.setText(_translate("frmMASGDC", "0.1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("frmMASGDC", "Parameters"))
self.label_17.setText(_translate("frmMASGDC", "From:"))
self.label_44.setText(_translate("frmMASGDC", "To:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("frmMASGDC", "Fold"))
self.label_4.setText(_translate("frmMASGDC", "Remove Class IDs"))
self.txtFilter.setText(_translate("frmMASGDC", "0"))
self.label_5.setText(_translate("frmMASGDC", "e.g. 0 or [1,2]"))
self.label_10.setText(_translate("frmMASGDC", "Existed Classes"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("frmMASGDC", "Filter Class ID"))
self.cbAverage.setText(_translate("frmMASGDC", "Average"))
self.cbPrecision.setText(_translate("frmMASGDC", "Precision"))
self.cbAPrecision.setText(_translate("frmMASGDC", "Average of Precision"))
self.cbRecall.setText(_translate("frmMASGDC", "Recall"))
self.cbF1.setText(_translate("frmMASGDC", "f1 score"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("frmMASGDC", "Metrics"))
self.label_12.setText(_translate("frmMASGDC", "$FOLD$ will be replaced by fold number."))
self.btnOutModel.setText(_translate("frmMASGDC", "..."))
self.label_36.setText(_translate("frmMASGDC", "Models (per fold/opt)"))
|
last = 1
next_ = 1
fib = [1]
n = int(input())
for _ in range(n-1):
last, next_ = next_, last + next_
fib.append(last)
for i, el in enumerate(reversed(fib)):
if i == (len(fib) - 1):
print(el)
break
print(el, end=' ')
|
import pygame
SCREENWIDTH = 756
SCREENHEIGHT = 650
pygame.init()
screen = pygame.display.set_mode([SCREENWIDTH, SCREENHEIGHT])
pygame.display.set_caption("Create chick")
keepGoing = True
chick_img = pygame.image.load("images/chick.png")
BG = pygame.image.load("images/background.png")
chick_lose = pygame.image.load("images/chicklose.png")
chick_height = 51
chick_width = 50
chick_x = []
chick_y = []
while keepGoing:
screen.blit(BG, (0, 0))
temp_chick_x = []
temp_chick_y = []
for i in range(len(chick_y)):
if chick_y[i] <= SCREENHEIGHT - chick_height:
temp_chick_x.append(chick_x[i])
temp_chick_y.append(chick_y[i] + 10)
chick_x = temp_chick_x
chick_y = temp_chick_y
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
elif event.type == pygame.MOUSEBUTTONDOWN:
spot = event.pos
chick_x.append(spot[0])
chick_y.append(spot[1])
print(chick_x)
print(chick_y)
for i in range(len(chick_x)):
if chick_y[i] > SCREENHEIGHT - chick_height:
screen.blit(chick_lose, (chick_x[i], chick_y[i]))
else:
screen.blit(chick_img, (chick_x[i], chick_y[i]))
pygame.display.update()
pygame.quit() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-15 13:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_sourcing', '0010_auto_20180515_2108'),
]
operations = [
migrations.AlterUniqueTogether(
name='dimmaterial',
unique_together=set([('fiber_content', 'fiber_construction', 'yarn_size')]),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""《算法导论》 123页
选择算法"""
from a_swap import swap
from p095_quick_sort import quick_sort
def median(lst):
"""return the small median of list"""
tmp_lst = lst.copy()
quick_sort(tmp_lst)
return tmp_lst[(len(lst)-1) // 2]
def partition(lst, median_value):
"""divide lst by median_value and return the index of median_value"""
middle_index = -1
for i in range(len(lst)):
if lst[i] <= median_value:
middle_index += 1
swap(lst, middle_index, i)
if lst[middle_index] == median_value:
self_index = middle_index
swap(lst, middle_index, self_index)
return middle_index
def select(lst, ith):
if lst == [] or ith > len(lst):
return None
group_size = 5
group_count = len(lst) // group_size
if len(lst) % group_size != 0:
group_count += 1
if group_count == 1:
tmp_lst = lst.copy()
quick_sort(tmp_lst)
return tmp_lst[ith-1]
median_values = []
for i in range(group_count-1):
median_values.append(median(lst[i * group_size: i * group_size + group_size]))
median_values.append(median(lst[(group_count-1) * group_size:]))
median_value = select(median_values, (group_count - 1) // 2)
kth = partition(lst, median_value) + 1
if kth == ith:
return median_value
elif kth > ith:
return select(lst[:kth-1], ith)
else:
return select(lst[kth:], ith-kth)
if __name__ == '__main__':
unsorted = [49, 27, 65, 97, 76, 12, 49, 38, 9, 99, 47, 32, 132, 65, 34, 78, 47, 32, 16, 87, 0]
for index in range(len(unsorted)):
print(select(unsorted, index+1)) |
import pyparsing as pyp
import math
import operator
import datetime
class NumericStringParser(object):
'''
Most of this code comes from the fourFn.py pyparsing example
http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
__author__='Paul McGuire'
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
def __init__(self, dict_var={}):
self.dict_var = dict_var
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
point = pyp.Literal( "." )
e = pyp.CaselessLiteral( "E" )
fnumber = pyp.Combine(
pyp.Word( "+-" + pyp.nums, pyp.nums ) +
pyp.Optional( point + pyp.Optional( pyp.Word( pyp.nums ) ) ) +
pyp.Optional( e + pyp.Word( "+-" + pyp.nums, pyp.nums ) )
)
ident = pyp.Word(pyp.alphas, pyp.alphas + pyp.nums + "_$")
plus = pyp.Literal( "+" )
minus = pyp.Literal( "-" )
mult = pyp.Literal( "*" )
div = pyp.Literal( "/" )
pow_ = pyp.Literal( "^" )
lshift = pyp.Literal( "<<" )
rshift = pyp.Literal( ">>" )
# not_ = pyp.Literal( "not" )
and_ = pyp.Literal( "and" )
or_ = pyp.Literal( "or" )
xor_ = pyp.Literal( "xor" )
eq = pyp.Literal( "==" )
neq = pyp.Literal( "!=" )
gt = pyp.Literal( ">" )
ge = pyp.Literal( ">=" )
lt = pyp.Literal( "<" )
le = pyp.Literal( "<=" )
lpar = pyp.Literal( "(" ).suppress()
rpar = pyp.Literal( ")" ).suppress()
addop = plus | minus | and_ | or_ | xor_
multop = mult | div | lshift | rshift
equop = eq | neq | ge | gt | le | lt
expop = pow_ # | not_
pi = pyp.CaselessLiteral( "PI" )
variables = pyp.Word("$", pyp.alphanums + '.' + '_')
expr = pyp.Forward()
equation = pyp.Forward()
atom = (
(
pyp.Optional(pyp.oneOf("- +")) +
(pi | e | fnumber | ident + lpar + expr + rpar | variables).setParseAction(self.pushFirst)
) |
pyp.Optional(pyp.oneOf("- +")) +
pyp.Group(lpar + expr + rpar)
).setParseAction(self.pushUMinus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = pyp.Forward()
factor << atom + pyp.ZeroOrMore( ( expop + factor ).setParseAction( self.pushFirst ) )
term = factor + pyp.ZeroOrMore( ( multop + factor ).setParseAction( self.pushFirst ) )
expr << term + pyp.ZeroOrMore( ( addop + term ).setParseAction( self.pushFirst ) )
equation << expr + pyp.ZeroOrMore( ( equop + expr ).setParseAction( self.pushFirst ) )
self.bnf = equation
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
self.opn = {
"+" : operator.add,
"-" : operator.sub,
"*" : operator.mul,
"/" : operator.truediv,
"^" : operator.pow,
"<<" : operator.lshift,
">>" : operator.rshift
}
self.equality_opn = {
"==" : operator.eq,
"!=" : operator.ne,
">=" : operator.ge,
">" : operator.gt,
"<=" : operator.le,
"<" : operator.lt
}
self.logical_opn = {
"and" : operator.and_,
"or" : operator.or_,
"not" : operator.not_,
"xor" : operator.xor,
}
self.fn = {
"sin" : math.sin,
"cos" : math.cos,
"tan" : math.tan,
"acos" : math.acos,
"asin" : math.asin,
"atan" : math.atan,
"sqrt" : math.sqrt,
"abs" : abs,
"trunc" : lambda a: int(a),
"round" : round,
"exp" : math.exp,
"log" : math.log,
"log2" : math.log2,
"Log" : math.log10,
"not" : operator.not_,
# For Python3 compatibility, cmp replaced by ((a > 0) - (a < 0)). See
# https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
"sgn" : lambda a: abs(a) > epsilon and ((a > 0) - (a < 0)) or 0
}
self.exprStack = []
def pushFirst(self, strg, loc, toks ):
self.exprStack.append( toks[0] )
def pushUMinus(self, strg, loc, toks ):
if toks and toks[0] == '-':
self.exprStack.append( 'unary -' )
def evaluateStack(self, s ):
op = s.pop()
if op == 'unary -':
op1 = self.evaluateStack(s)
if op1 is None:
return None
return 0. - op1
elif op == 'not':
op1 = self.evaluateStack(s)
if op1 is None:
return None
return int(self.logical_opn[op]( int(op1)))
elif op in ['>>', '<<']:
op2 = self.evaluateStack( s )
op1 = self.evaluateStack( s )
if op1 is None or op2 is None:
return None
return self.opn[op]( int(op1), int(op2) )
elif op in list(self.opn.keys()):
op2 = self.evaluateStack( s )
op1 = self.evaluateStack( s )
if op1 is None or op2 is None:
return None
return self.opn[op]( op1, op2 )
elif op in list(self.logical_opn.keys()):
op2 = self.evaluateStack( s )
op1 = self.evaluateStack( s )
if op1 is None or op2 is None:
return None
return self.logical_opn[op]( int(op1), int(op2) )
elif op in list(self.equality_opn.keys()):
op2 = self.evaluateStack( s )
op1 = self.evaluateStack( s )
if op1 is None or op2 is None:
return None
return int(self.equality_opn[op]( op1, op2 ))
elif op == "PI":
return math.pi # 3.1415926535
elif op.startswith('$'): # custom variables
op = op[1:]
split_op = op.split('.')
key = split_op[0]
property_name = split_op[1] if len(split_op) > 1 else None
if property_name is None:
value = self.dict_var[key]
else:
value = getattr(self.dict_var[key], property_name)
if isinstance(value, datetime.datetime):
value = (value - datetime.datetime(1970,1,1)).total_seconds()
return value
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
op1 = self.evaluateStack(s)
if op1 is None:
return None
return self.fn[op](op1)
elif op[0].isalpha():
return 0
else:
return float(op)
def eval(self, num_string, parseAll = True):
self.exprStack = []
results = self.bnf.parseString(num_string, parseAll)
val = self.evaluateStack( self.exprStack[:] )
return val
if __name__ == "__main__":
dict_var = {"A": 10, "B": 100}
nsp = NumericStringParser(dict_var)
print(nsp.eval('$A+$B / 3 '))
import dataclasses
@dataclasses.dataclass
class TestClass:
name: str
value_A: float
valueB: int = 0
dict_var = {"A_A": TestClass('nameA', 10.01), "B_B": TestClass('nameB', 100 , 10)}
nsp = NumericStringParser(dict_var)
print(nsp.eval('$A_A.value_A * $A_A.valueB + $B_B.value_A * $B_B.valueB / 3 '))
# @dataclasses.dataclass
class TestClass2:
def __init__(self, name, value):
self.name = name
self.value = value
@property
def value2(self):
return self.value * self.value
expression = """ 0.5 * $A.value * ( 0.25 * $C.value * $D.value - ( $D.value - 2 * $E.value ) * sqrt( 100 + $E.value * $D.value - $E.value^ 2) ) """
expression = """ $A.value2 """
dict_var = {}
dict_var["A"] = TestClass2('nameA', 10.0)
dict_var["B"] = TestClass2('nameB', 2.0)
dict_var["C"] = TestClass2('nameC', 4.0)
dict_var["D"] = TestClass2('nameD', 3.0)
dict_var["E"] = TestClass2('nameE', 5.0)
nsp = NumericStringParser(dict_var)
print(nsp.eval(expression))
expressions = []
expressions.append('1 or 0')
expressions.append('1 or 1')
expressions.append('0 or 1')
expressions.append('0 or 0')
expressions.append('1 and 0')
expressions.append('1 and 1')
expressions.append('0 and 1')
expressions.append('0 and 0')
expressions.append('1 xor 0')
expressions.append('1 xor 1')
expressions.append('0 xor 1')
expressions.append('0 xor 0')
expressions.append('1 or not(0)')
expressions.append('1 or not(1)')
expressions.append('0 or not(1)')
expressions.append('0 or not(0)')
expressions.append('1 and not(0)')
expressions.append('1 and not(1)')
expressions.append('0 and not(1)')
expressions.append('0 and not(0)')
expressions.append('1 xor not(0)')
expressions.append('1 xor not(1)')
expressions.append('0 xor not(1)')
expressions.append('not(0)*3.5')
expressions.append('not(1)*3.5')
expressions.append('5 - 3 > 5 - 3')
expressions.append('5 - 3 < 5 - 3')
expressions.append('5 - 3 == 5 - 3')
expressions.append('5 - 3 != 5 - 3')
expressions.append('5 - 3 <= 5 - 3')
expressions.append('5 - 3 >= 5 - 3')
expressions.append('(1 and (1 or 0)) == (1 or not(1))')
expressions.append('2 >> 10')
expressions.append('1 << 10')
nsp = NumericStringParser(dict_var)
for e in expressions:
try:
answer = int(eval(e))
except:
answer = None
print(f'{e} = {nsp.eval(e)} (Correct answer: {answer})')
|
class Trap(object):
"""Handle state of trap, trigger it, calculate and return effect.
* Holzbalken
* Vereister See
"""
def __init__(self, effect):
self.effect = effect
def snap(self):
pass
|
# -*- coding: utf-8 -*-
"""
@Time : 2020-09-09 16:59
@Author : QDY
@FileName: 164. 最大间距.py
@Software: PyCharm
"""
"""
给定一个无序的数组,找出数组在排序之后,相邻元素之间最大的差值。
如果数组元素个数小于 2,则返回 0。
示例1:
输入: [3,6,9,1]
输出: 3
解释: 排序后的数组是 [1,3,6,9], 其中相邻元素 (3,6) 和 (6,9) 之间都存在最大差值 3。
示例2:
输入: [10]
输出: 0
解释: 数组元素个数小于 2,因此返回 0。
说明:
你可以假设数组中所有元素都是非负整数,且数值在 32 位有符号整数范围内。
请尝试在线性时间复杂度和空间复杂度的条件下解决此问题。
"""
import math
class Solution:
def maximumGap(self, nums) -> int:
n = len(nums)
if n <= 1: return 0
# nums.sort()
# res = 0
# for i in range(1,n):
# res = max(res,nums[i]-nums[i-1])
# return res
# 桶排序
max_, min_ = max(nums), min(nums)
if n == 2: return max_ - min_
if min_ == max_: return 0
gap = math.ceil((max_ - min_) / (n - 1)) # 分成n-1个桶,每个桶大小为gap
bucket_min = [float('inf')] * (n - 1) # 只记录每个桶中最大的数和最小的数
bucket_max = [-float('inf')] * (n - 1)
for i in range(n):
if nums[i] == max_: # 特例
bucket_min[-1] = min(bucket_min[-1], max_)
continue
# [min_,min_+gap),[min_+gap,min_+2*gap),...[min_+(n-2)*gap,min_+(n-1)*gap)
index = (nums[i] - min_) // gap
bucket_max[index] = max(nums[i], bucket_max[index])
bucket_min[index] = min(nums[i], bucket_min[index])
res = 0
for i in range(n - 1):
if bucket_max[i] != -float('inf'):
prev_max = bucket_max[i]
for j in range(i + 1, n - 1):
if bucket_min[j] != float('inf'):
res = max(res, bucket_min[j] - prev_max)
prev_max = bucket_max[j]
break
return res
|
import json
from collections import defaultdict
with open("./resources/plenarprotokolle/group_1/splitted/mdb.json") as f:
mdb = json.load(f)
print(f"mdb file contains a total of {len(mdb)}")
mdb = {
k: v
for k, v in mdb.items()
if "debug_info" in v}
print(f"mdb file contains a total of with a debug_info key {len(mdb)}")
mdbs_with_lower_forename = {k: v for k, v in mdb.items() if v["forename"][0].islower()}
print(f"mdb file contains {len(mdbs_with_lower_forename)} with a forename starting with a lower letter")
mdb_dot_start = {k: v for k, v in mdb.items() if v["forename"].startswith(".")}
print(f"mdb file contains {len(mdb_dot_start)} with a forename starting with a '.'")
mdb_gaulands = {k: v for k, v in mdb.items() if v["surname"] == "Gauland"}
print(f"mdb file contains {len(mdb_gaulands)} gaulands")
mdbs_by_surname = defaultdict(list)
for mdb_id, data in mdb.items():
mdbs_by_surname[data["surname"]].append(data)
mdbs_with_same_surname = {k: v for k, v in mdbs_by_surname.items() if len(v) > 1}
print(f"mdb file contains {len(mdbs_with_same_surname)} mdbs with the same surname")
|
class Solution:
def longestPalindrome(self, s: str) -> str:
s1 = "#"
j = 0
length = 0
res = ""
# 把奇数偶数字符串都变成奇数的
for i in range(len(s)):
s1 = s1 + s[i] +"#"
# 对n个中心依次求解,保存最长的对应的中心和一半的长度
for i in range(len(s1)):
temp = self.central(s1,i)
if temp > length:
j = i
length = temp
s2 = s1[j - length : j + length]
# 去掉#符号
for i in range(len(s2)):
if s2[i] != "#":
res = res + s2[i]
return res
# 返回以i为中心的字符串s的最长回文子串的一半的长度
def central(self, s, i):
L = i
R = i
while (L >= 0 and R < len(s) and s[L] == s[R] ):
L = L - 1
R = R + 1
return (R-L)//2 - 1
s = Solution()
print(s.longestPalindrome("abba"))
|
'''
Just a file containing some of the plotting functions used by the notebook
'''
import numpy as np
import matplotlib.pylab as plt
import Node
#function to plot the nodes generated by the algorithm
def plot_nodes_astar(node_list, ymin = None, ymax = None):
L = 0.5
#Add limits if necessary
if ymin != None:
plt.ylim(ymin , ymax)
z = []
x = []
a = []
visited = []
#get z,x,alpha for all nodes
for node in node_list:
state = node.state
visited.append(node.visited)
z.append(state[0])
x.append(state[2])
a.append(state[4])
#calculate pos of mas
z_mass_a = z-L*np.cos(a)
x_mass_a = x-L*np.sin(a)
x_visited = np.array(x)[visited]
z_visited = np.array(z)[visited]
x_mass_visited = np.array(x_mass_a)[visited]
z_mass_visited = np.array(z_mass_a)[visited]
plt.title("Nodes Generated")
plt.ylabel("Z Position")
plt.xlabel("X Position")
plt.axis('equal')
#plot all nodes
plt.scatter(x,z, c = 'C0', label = 'Nodes Not Visited', marker = '.')
#plot visited nodes
plt.scatter(x_visited,z_visited, c = 'C2', label = 'Nodes Visited', marker = '.')
plt.legend(loc = 'best')
#Function for plotting the position of the quadrotor woth suspended payload
def plot_position(result_a,t_a,positions):
z_quad_a = result_a[0,:]
x_quad_a = result_a[2,:]
alpha_a = result_a[4,:]
L = 0.5
z_mass_a = z_quad_a-L*np.cos(alpha_a)
x_mass_a = x_quad_a-L*np.sin(alpha_a)
plt.title("Path Followed by Quadrotor")
plt.plot(x_mass_a, z_mass_a, label = 'Mass', ls ='--', lw = 0.5)
plt.plot(x_quad_a,z_quad_a, label = 'Quadrotor', ls ='--', color = 'tab:blue', lw = 0.5)
for p in positions:
link = plt.Line2D((x_quad_a[p], x_mass_a[p]), (z_quad_a[p], z_mass_a[p]), lw=0.5, color = 'k')
plt.gca().add_line(link)
mass = plt.Circle((x_mass_a[p], z_mass_a[p]), 0.07, fc='k')
plt.gca().add_patch(mass)
plt.xlabel('X Position')
plt.ylabel('Y Position')
#plt.legend(loc = 'best')
#plt.show()
def plot_input_seq(ts, input_seq_a, U, path):
'''
This method plots a corresponding input sequence
'''
#plot the input sequence
k = 0
t = []
vz = []
vx = []
for u in input_seq_a:
t.append(k*Tsample)
t.append((k+1)*Tsample)
vz.append(u[0])
vz.append(u[0])
vx.append(u[1])
vx.append(u[1])
k += 1
fig = plt.figure(figsize= [6, 3])
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
#ax.set_title('Desired Input Sequence')
ax.set_title(r'Input Commands')
ax.set_xlabel(r'Time $t_{s}$ (s)')
ax.set_ylabel('Input')
#ax.axis([-0.1, 3.5, -5.9, 5.9])
t_pos = 0
plt.plot([0,0],[-6,6], color = 'grey', ls = ':', lw = 0.5)
for n in path:
duration = U[n.input_type].shape[0]
plt.plot([t_pos + ts*duration,t_pos + ts*duration],[-6,6], color = 'grey', ls = ':', lw = 0.5)
t_pos += ts*duration
plt.plot(t, vz, label = r"$\dot{z}_{ref}$")
plt.plot(t, vx, label = r"$\dot{x}_{ref}$")
plt.legend(loc = 'best')
plt.tight_layout()
plt.show() |
# Generated by Django 3.0.8 on 2020-12-04 05:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aplication', '0004_matricula'),
]
operations = [
migrations.AddField(
model_name='cupon',
name='por_programa',
field=models.BooleanField(default=False),
preserve_default=False,
),
migrations.CreateModel(
name='Cupon_Programa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cupon_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aplication.Cupon')),
('programa_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aplication.Programa')),
],
options={
'verbose_name': 'Cupón de programa',
'verbose_name_plural': 'Cupones de programa',
},
),
]
|
"""
Copyright (C) 2021 Patrick Maloney
"""
import unittest
from python_meteorologist import forecast as fc
class ForecastTest(unittest.TestCase):
def test_missing_user_agent(self):
with self.assertRaises(TypeError) as context:
fc.Forecaster()
self.assertTrue('User Agent is required.' in str(context.exception))
def test_forecast_properties(self):
forecaster = fc.Forecaster('Test Application, alertingavian@vivaldi.net')
forecast = forecaster.get_forecast(20017)
# properties
# make sure all are of the correct type
self.assertEqual(type(forecast.properties.updated), str)
self.assertEqual(type(forecast.properties.generated_at), str)
self.assertEqual(type(forecast.properties.update_time), str)
self.assertEqual(type(forecast.properties.valid_times), str)
self.assertEqual(type(forecast.properties.elevation), float)
# make sure all are > 0
self.assertGreater(len(forecast.properties.updated), 0)
self.assertGreater(len(forecast.properties.generated_at), 0)
self.assertGreater(len(forecast.properties.update_time), 0)
self.assertGreater(len(forecast.properties.valid_times), 0)
def test_forecast_periods(self): # do i really have to do it again for the hourly test. plz no
forecaster = fc.Forecaster('Test Application, alertingavian@vivaldi.net')
forecast = forecaster.get_forecast(20017)
# periods list
for period in forecast.periods:
# check type
self.assertEqual(type(period), fc.Period)
# individual periods
for period in forecast.periods:
# number
self.assertEqual(type(period.number), int, msg=f'Expected type number: int, actual: {type(period.number)}')
self.assertGreater(period.number, 0)
# name
self.assertEqual(type(period.name), str, msg=f'Expected type name: str, actual: {type(period.name)}')
self.assertGreater(len(period.name), 0)
# start_time
self.assertEqual(type(period.start_time), str, msg=f'Expected type start_time: str, actual: '
f'{type(period.start_time)}')
self.assertGreater(len(period.start_time), 0)
# end_time
self.assertEqual(type(period.end_time), str, msg=f'Expected type end_time: str, actual: '
f'{type(period.end_time)}')
self.assertGreater(len(period.end_time), 0)
# is_day_time
self.assertEqual(type(period.is_daytime), bool, msg=f'Expected type is_daytime: int, actual: '
f'{type(period.number)}')
# temperature
self.assertEqual(type(period.temperature), int, msg=f'Expected type temperature: int, actual: '
f'{type(period.temperature)}')
# temp_unit
self.assertEqual(type(period.temp_unit), str, msg=f'Expected type temp_unit: str, actual: '
f'{type(period.temp_unit)}')
self.assertEqual(len(period.temp_unit), 1)
# wind_speed
self.assertEqual(type(period.wind_speed), str, msg=f'Expected type wind_speed: str, actual: '
f'{type(period.wind_speed)}')
self.assertGreater(len(period.wind_speed), 0)
# wind_direction
self.assertEqual(type(period.wind_direction), str, msg=f'Expected type wind_direction: str, actual: '
f'{type(period.wind_direction)}')
self.assertGreater(len(period.wind_direction), 0) # what happens if wind speed is 0 and there is no dir
# icon
self.assertEqual(type(period.icon), str, msg=f'Expected type icon: str, actual: {type(period.icon)}')
self.assertTrue('http' in period.icon)
self.assertGreater(len(period.icon), 0)
# short_forecast
self.assertEqual(type(period.short_forecast), str, msg=f'Expected type short_forecast: str, actual: '
f'{type(period.short_forecast)}')
self.assertGreater(len(period.short_forecast), 0)
# long_forecast
self.assertEqual(type(period.long_forecast), str, msg=f'Expected type long_forecast: str, actual: '
f'{type(period.long_forecast)}')
self.assertGreater(len(period.long_forecast), 0)
if __name__ == '__main__':
unittest.main()
|
#coding=utf-8
'''
Created on 2017年8月20日
@author: tjx
'''
import os
import re
import time
def add_meta_data(filename,line):
with open(filename, 'r+') as f:
print "Begin to process {0}".format(filename)
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
print "Finish to process {0}\n".format(filename)
f.close()
line="车牌号id1,车牌号id2,车牌类型1,车牌类型2,号牌种类,识别时间,上传时间,监测点id,进出口类型,车道id,车身颜色,车辆类型,车标,信息卡编码,违法类型,速度,前端id,行驶方向,图片张数,图片id1,图片id2,图片id3,图片id4,图片id5,前牌是否完成,后牌是否完成,前后牌是否一致,置信度,补传标志,方波长度,分区号,视频URL,保留字段,备用字段1,备用字段2,备用字段3"
dir="/home/tjx/20170521_25/25"
#dir=os.getcwd()
print dir
beginTime=time.time()
print "The program begins at {0} \n".format(beginTime)
for filename in os.listdir(dir):
filedir=dir+'/'+filename
f=open(filedir)
firstline=f.readline()
#do not reset the metadata
if not re.match("车牌号id1*", firstline):
f.close()
add_meta_data(filedir, line)
else:
f.close()
endTime=time.time()
print "The program ends at {0}\n".format(endTime)
print "Use {0} ".format(endTime-beginTime)
#df=pd.read_csv("rImport_rr201705011526_01_4.csv")
#df.info()
|
class Pbs:
"""Class to setup PBS runs in various ways"""
# class variables shared by all instances
def __init__(self, hostFile='hosts', startDir=''):
"""
Stuff
"""
import os
from datetime import datetime
# instance variables unique to each instance
self.hostFile = hostFile
if startDir == '':
self.startDir = os.getcwd()
self.jobDir = ''
self.runDir = ''
self.clusterName = ''
self.jobId = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
self.numProcess = 1
def runSetup(self):
"""
When our job gets launched from queue to running on a node,
we need to setup some stuff.
Query information available in the environment under
PBS_* and BC_* (if available).
If this is available, override defaults
"""
import os
from datetime import datetime
import utils
# Copy starting folder to work directory
# ==============================================
# Grab directory where submit was performed
if 'PBS_O_WORKDIR' in os.environ:
self.startDir = os.environ['PBS_O_WORKDIR']
# Extract job id numbers by splitting off before the dot
if 'PBS_JOBID' in os.environ:
self.jobId = os.environ['PBS_JOBID'].split('.')[0]
# Where we want to copy the run folder to
clusterName = utils.detectLocation()
if 'PBS_JOBNAME' in os.environ:
self.jobName = os.environ['PBS_JOBSNAME']
else:
self.jobName = 'python'
if clusterName == '':
self.jobDir = '.'.join([self.jobName, self.jobId])
else:
self.jobDir = '.'.join([self.jobName, self.jobId, clusterName])
# Make a folder next to the startup folder
self.runDir = os.path.join(self.startDir, '..', self.jobDir)
os.mkdir(self.runDir)
# --------------------------------
# Store the name of the sorted unique PBS assigned hosts
# (nodes) to the file hosts
if 'PBS_NODEFILE' in os.environ:
nodeFile = os.environ['PBS_NODEFILE']
with open(nodeFile) as f:
nodes = f.readlines()
else:
nodes = []
self.numProcess = self.getNumProcess(nodes)
# Get a unique set of the nodes
uNodes = set(nodes)
# Write to a file for mpirun to read
with open(self.hostFile) as f:
f.writelines(uNodes)
def getNumProcess(self, nodes=[]):
import os
if 'BC_MPI_TASKS_ALLOC' in os.environ:
# Count by input environment variable
# This method usable on topaz
numProcess = os.environ['BC_MPI_TASKS_ALLOC']
elif 'PBS_NODEFILE' in os.environ:
# Count by number of nodes (probably duplicates)
# This method usable on afit
"""
Depending on PBS setup, nodes can be a duplicated based on number of processors per node
e.g.
nodeX and nodeY have 2 cores each and we requested 4, then nodes would be
nodeX
nodeX
nodeY
nodeY
"""
numProcess = nodes.count()
else:
numProcess = 1
return numProcess |
from pylab import *
myfont = matplotlib.font_manager.FontProperties(fname='微软雅黑.ttf')
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
class Line(object):
def __init__(self,label,capacity):
self.label = label
self.capacity = capacity
self.x_list = []
self.y_list = []
self.sort()
def sort(self):
d = self.capacity
self.x_list = sorted(d.keys())
for x in self.x_list:
self.y_list.append(d[x])
class LineChart(object):
def __init__(self,x_label="x",y_label="y",title=""):
self.lines = []
self.x_label = x_label
self.y_label = y_label
self.title = title
def addLine(self,line):
self.lines.append(line)
def removeLine(self,line):
self.lines.remove(line)
def showLines(self,show_broken = True, show_bar = False,show_value = False, values=None, is_save=False,is_datetime = False,img="figure"):
if len(self.lines) == 0:
return
plt.figure(1)
for i in range(0,len(self.lines)):
line = self.lines[i]
if show_broken:
# plt.plot(line.x_list,line.y_list)
if is_datetime:
ax = plt.gca()
ax.plot_date(line.x_list,line.y_list, 'o-',label='$'+line.label+'$')
ax.xaxis.set_major_formatter( DateFormatter('%Y-%m-%d') )
ax.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
else:
plt.plot(line.x_list,line.y_list, 'o-',label='$'+line.label+'$')
if show_bar:
plt.bar(line.x_list, line.y_list, alpha = .5)
if show_value:
value = values[i]
for j in range(0,len(line.x_list)):
x = line.x_list[j]
y = line.y_list[j]
# plt.text(x*93/100, y*101/100, values[j])
if is_datetime == False:
plt.annotate(value[j],xy=(x*94/100, y*101/100))
else:
plt.annotate(value[j],xy=(x,y))
plt.legend(loc='upper center', bbox_to_anchor=(0.8,0.8),fancybox=True)
plt.xlabel(self.x_label,fontproperties=myfont)
plt.ylabel(self.y_label,fontproperties=myfont)
plt.title(self.title,fontproperties=myfont)
plt.show()
if is_save == True:
plt.savefig(img) |
import signal
import sys
# For Python 2.X.X
if (sys.version_info[0] == 2):
import openmoc
import _openmoc_cuda
from openmoc_cuda import *
# For Python 3.X.X
else:
import openmoc.openmoc as openmoc
import _openmoc_cuda
from openmoc.cuda.openmoc_cuda import *
# Tell Python to recognize CTRL+C and stop the C++ extension module
# when this is passed in from the keyboard
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets import ViewletBase
SITES_TO_PUBLISH = [{'title': 'plone.de', 'url': 'http://plone.de'},
{'title': 'plone.es', 'url': 'http://plone.es'},
{'title': 'plone.fr', 'url': 'http://plone.fr'},
{'title': 'plone.it', 'url': 'http://plone.it'},
{'title': 'plone.jp', 'url': 'http://plone.jp'},
{'title': 'plone.nl', 'url': 'http://plone.nl'},
{'title': 'plone.org.br', 'url': 'http://plone.org.br'},
{'title': 'plone.org.pl', 'url': 'http://plone.org.pl/'},
{'title': 'plone.ro', 'url': 'http://plone.ro'},
]
class SlimbarViewlet(ViewletBase):
index = ViewPageTemplateFile('slimbar.pt')
def update(self):
current_domain = self.request.BASE1
self.sites_to_publish = [el for el in SITES_TO_PUBLISH if
el['title'] not in current_domain]
|
'''
Question: You have two numbers represented by a linked list where each node contains a single digit. The digits are
stored in reverse order, such that the 1's digit is at the head of the list. Write a function that adds the two numbers
and returns the sum as a linked list.
Example:
Input: 7 1 6
5 9 2
Output:2 1 9
FOLLOW UP:
Suppose that the digits are stored in forward order. Repeat above problem
Example:
Input: 6 1 7
2 9 5
Output:9 1 2
'''
class Node:
def __init__(self, data):
self.data = data
self.next = None
def solution(h1, h2):
head_sum = Node(-1)
prev = head_sum
carry = 0
while h1 or h2:
if h1 and h2:
sum = h1.data + h2.data
elif h2:
sum = h2.data
else:
sum = h1.data
# get result digit and carry
r = sum % 10 + carry
carry = sum // 10
prev.next = Node(r)
prev = prev.next
if h1 and h2:
h1, h2 = (h1.next, h2.next)
elif h2:
h2 = h2.next
else:
h1 = h1.next
if carry:
prev.next = Node(carry)
return head_sum.next
def solution2(h1, h2):
head_sum = Node(-1)
carry = 0
solution2_helper(h1, h2, head_sum, carry)
return head_sum.next
def solution2_helper(h1, h2, prev_node, carry):
if not h1 and not h2 and not carry:
return None
sum = 0
sum += h1.data if h1 else 0
sum += h2.data if h2 else 0
sum += carry if carry else 0
# get result digit and carry
r = sum % 10
carry = sum // 10
prev_node.next = Node(r)
if h1 and h2:
solution2_helper(h1.next, h2.next, prev_node.next, carry)
elif h2:
solution2_helper(None, h2.next, prev_node.next, carry)
elif h1:
solution2_helper(h1.next, None, prev_node.next, carry)
def ll_len(head):
count = 0
while head != None:
count += 1
head = head.next
return count
def pad(head,count):
# pad front of LL with zeros
while count > 0:
n = Node(0)
n.next = head
head = n
count -= 1
return head
def solution_followup(h1, h2):
len1 = ll_len(h1)
len2 = ll_len(h2)
if len1 < len2:
h1 = pad(h1, len2-len1)
elif len1 > len2:
h2 = pad(h2, len1-len2)
# printLL(h1)
# printLL(h2)
(carry, head) = folloup_helper(h1, h2)
if carry:
n = Node(carry)
n.next = head
head = n
return head
def folloup_helper(h1, h2):
if not h1 and not h2:
return (0, None)
sum = 0
sum += h1.data if h1 else 0
sum += h2.data if h2 else 0
if h1 and h2:
data_store = folloup_helper(h1.next, h2.next)
elif h2:
data_store = folloup_helper(None, h2.next)
else:
data_store = folloup_helper(h1.next, None)
prev_carry = data_store[0]
prev_digit_object = data_store[1]
r = sum % 10 + prev_carry
carry = sum // 10
n = Node(r)
n.next = prev_digit_object
return (carry, n)
def createLL(ll):
head = Node(ll[0])
node = head
for n in ll[1:]:
node.next = Node(n)
node = node.next
return head
def printLL(head):
node = head
while node != None:
print(node.data,end='->')
node = node.next
print()
if __name__ == '__main__':
# print("Enter numbers representing Linked Lists: (e.g. '7 1 6' and '5 9 2')")
# ll = list(map(int,input().split()))
# ll2 = list(map(int,input().split()))
# h1 = createLL(ll)
# h2 = createLL(ll2)
#
# printLL(h1)
# printLL(h2)
#
# # get sum
# sum_head = solution2(h1,h2)
# printLL(sum_head)
# --- Follow Up ---
print("\nFOLLOW UP:\nEnter numbers representing theLinked List: (e.g. '6 1 7')\nLL1:")
ll = list(map(int,input().split()))
print("LL2:")
ll2 = list(map(int,input().split()))
h1 = createLL(ll)
h2 = createLL(ll2)
printLL(h1)
printLL(h2)
# get sum
sum_head = solution_followup(h1,h2)
print("Output:")
printLL(sum_head) |
from tkinter import Tk, Label, Button, IntVar, DISABLED, NORMAL
from itertools import cycle
from sklearn.utils import shuffle
from PIL import ImageTk, Image
import numpy as np
import pandas as pd
from time import sleep
class Bridge:
def __init__(self, gui=True):
self.root = Tk()
self.gui = gui
self.game_is_initialized = False
def start(self): # Restarts the game without initializing basic blocks
print('Game start')
#### Bidding system (Non-gui)
self.last_bid = None
self.bidding_array = ["-1_-1","-1_-1","-1_-1"]
self.NSTricks = 0
self.WETricks = 0
if self.gui:
self.reset_bidding_gui()
#### Suffle the cards and assign to each player
for b in self.Buttons.ravel():
b.config(state=NORMAL)
self.card_df['played'] = False
self.card_df = shuffle(self.card_df)
sorted_dfs = []
for i in range(4):
tmp_df = self.card_df.iloc[13*i:13*(i+1)]
sorted_dfs.append(tmp_df.sort_values('suit'))
# print("player",i,sorted_dfs[-1].index)
self.card_df = pd.concat(sorted_dfs)
self.card_df['player'] = [self.player[0]]*13 + [self.player[1]]*13 + [self.player[2]]*13 + [self.player[3]]*13
# print(self.card_df)
if self.gui:
self.reset_card_gui()
#### Start bidding
self.start_bidding()
#### Start game play
if self.gui:
self.ResultDisplay.config(text='Game\nis\nON')
self.start_game_play()
def reset_bidding_gui(self):
for b in self.bidding_buttons.ravel():
b.config(state=NORMAL)
self.PassButton.config(state=NORMAL)
for b in self.bidding_display.ravel()[4:]:
b.config(text=' ')
self.BidDisplay.config(text=f'Bid Won by: \nBid: ')
self.ResultDisplay.config(text='Complete\nthe bidding')
self.TrickDisplay.config(text= f'Tricks:\nNS = {self.NSTricks}\nWE = {self.WETricks}')
def reset_card_gui(self):
self.clean_middle()
for b in self.Buttons.ravel():
b.config(state=DISABLED)
for d_i, player in enumerate(self.player):
for i in range(13):
self.Buttons[d_i, i].config(image=self.card_df.iloc[13*d_i+i]['img'])
def start_bidding(self):
self.bid_players = cycle(self.agents)
while set(self.bidding_array[-3:]) != set(['pass']) or self.last_bid is None:
# print('lstbid',self.last_bid,'barray',self.bidding_array[-3:])
if self.gui:
self.root.wait_variable(self.next_var)
bid, bid_idx = next(self.bid_players).play_bid(self.last_bid)
if bid != "pass":
self.last_bid = bid
self.bidding_array.append(bid)
if self.gui:
self.update_bidding_gui(bid, bid_idx)
self.final_bid = self.bidding_array[-4]
self.trump = self.bidding_array[-4][-1]
base_idx = len(self.bidding_array)
# If a suit is bided by parter first, he gets the chance unless bid in "NT" (No Trump)
if self.bidding_array[-4][-1] == self.bidding_array[-6][-1] and self.bidding_array[-4][-2:] != 'NT':
self.BidWinner = self.player[(base_idx+3)%4]
self.bid_winner_idx = (base_idx+3)%4
self.current_player = (base_idx+4)%4
else:
self.BidWinner = self.player[(base_idx+1)%4]
self.bid_winner_idx = (base_idx+1)%4
self.current_player = (base_idx+2)%4
print('BidWinner',self.BidWinner)
print('Final bid',self.final_bid)
if self.gui:
self.finish_bidding_gui()
def update_bidding_gui(self, bid, bid_idx):
self.bidding_display.ravel()[len(self.bidding_array)].config(text=bid)
if bid != 'pass':
for i in range(bid_idx+1):
self.bidding_buttons[:,1:].ravel()[i].config(state=DISABLED)
def finish_bidding_gui(self):
for b in self.bidding_buttons.ravel():
b.config(state=DISABLED)
self.PassButton.config(state=DISABLED)
self.BidDisplay.config(text=f'Bid Won by: {self.BidWinner}\nBid: {self.final_bid}')
self.ResultDisplay.config(text='Complete\nthe bidding')
def start_game_play(self):
self.agents[self.current_player-1].declarer = True
for i in range(13): # play 13 tricks
table = []
player_idx = [idx%4 for idx in range(self.current_player, self.current_player+4)]
for j in player_idx:
cards = self.card_df.iloc[j*13:(j+1)*13]
unplayed_cards = cards[cards['played']==False].index
if self.gui:
self.root.wait_variable(self.next_var)
if j == self.current_player:
self.clean_middle()
played_card = self.agents[j].play_move(unplayed_cards, table)
self.card_df.loc[played_card, 'played'] = True
table.append(played_card)
print(f'{played_card}', end=' ')
if self.gui:
self.update_card_gui(played_card)
print()
self.current_player = player_idx[self.argbest(table)]
if self.current_player in [0,2]:
self.NSTricks += 1
else:
self.WETricks += 1
if self.gui:
self.update_tricks_gui()
# Deside who won the game
if self.bid_winner_idx in [0,2]:
if self.NSTricks >= int(self.final_bid.split('_')[0])+6:
self.Winner = 'NS'
else:
self.Winner = 'WE'
else:
if self.WETricks >= int(self.final_bid.split('_')[0])+6:
self.Winner = 'WE'
else:
self.Winner = 'NS'
print('NS tricks',self.NSTricks)
print('WE tricks',self.WETricks)
print('Winner is', self.Winner)
if self.gui:
self.ResultDisplay.config(text=f'{self.Winner}\nWon the Game')
def argbest(self, table):
weights = [self.get_weight(table[0])]
suit = table[0][-1]
for card in table[1:]:
if card[-1] == suit:
weights.append(self.get_weight(card))
else:
if card[-1] == self.trump:
weights.append(self.get_weight(card)+1000)
else:
weights.append(self.get_weight(card)-1000)
return np.argmax(weights)
def update_tricks_gui(self):
self.ResultDisplay.config(text=self.player[self.current_player]+'\nwon the trick')
self.TrickDisplay.config(text= f'Tricks:\nNS = {self.NSTricks}\nWE = {self.WETricks}')
def update_card_gui(self, played_card):
idx = self.card_df.index.get_loc(played_card)
self.Buttons.ravel()[idx].config(image=self.mscaled_img)
self.MidLabels[idx//13].config(image=self.card_df['img'].iloc[idx])
def init(self): # One time initialization
############## Main line of code. Set who all are playing this game
self.agents = [RandomPlayer() for _ in range(4)]
###############################################
# Non-gui components
###############################################
self.card_df = pd.DataFrame(np.zeros((52,6))*np.nan,
columns=['id', 'suit', 'face', 'img', 'player', 'played'], dtype='object')
self.card_type = ['C', 'S', 'D', 'H'] # Club, Spade, Diamond, Heart
self.player = ['S', 'W', 'N', 'E'] # South, West, North, East
self.card_face = ['A'] + list(map(str, range(2, 10+1))) + ['J', 'Q', 'K'] # A, 1 to 10, J, Q, K
###############################################
# gui components
###############################################
self.root.geometry('1200x800')
self.bidding_scale = 0.03
self.scale = 0.12
self.card_path = 'cards/png1/'
self.middle_card_path = 'cards/png1/gray_back.png'
self.w, self.h = Image.open(self.card_path+'10C.png').size
mimg = Image.open(self.middle_card_path)
self.mscaled_img = ImageTk.PhotoImage(mimg.resize((int(self.w*self.scale), int(self.h*self.scale))))
self.played = IntVar(self.root)
self.next_var = IntVar(self.root)
# Loading all card images (gui)
ind1 = 0
for card_type in self.card_type:
for card_face in self.card_face:
img = Image.open(f'{self.card_path}{card_face}{card_type}.png')
scaled_img = ImageTk.PhotoImage(img.resize((int(self.w*self.scale), int(self.h*self.scale))))
self.card_df.loc[ind1, 'id'] = card_face+card_type
self.card_df.loc[ind1, 'suit'] = card_type
self.card_df.loc[ind1, 'face'] = card_face
self.card_df.loc[ind1, 'img'] = scaled_img
ind1 += 1
self.card_df.set_index('id', inplace=True)
### Start button (gui)
self.StartButton = Button(self.root, text='Start/Restart', font=('Arial', 25))
self.StartButton.place(x=0,y=0)
self.StartButton.configure(command = self.start)
### Next button (gui)
self.NextButton = Button(self.root, text='Next', font=('Arial', 25))
self.NextButton.place(x=0,y=50)
def inc_robo_play():
self.next_var.set(self.next_var.get()+1)
self.NextButton.configure(command = inc_robo_play)
### Bidding system (gui)
rows, columns = 7, 6
xoffset, yoffset = 10, 100
xgap = 30
ygap = 40
w, h = Image.open(self.card_path+'NT.png').size
trupt_names = ['Club', 'Diamond', 'Heart', 'Spade', 'NT']
trupt_imgs = [Image.open(self.card_path+name+'.png') for name in trupt_names]
trupt_imgs = [ImageTk.PhotoImage(img.resize((int(w*self.bidding_scale),
int(h*self.bidding_scale)))) for img in trupt_imgs]
self.bidding_buttons = np.empty((rows,columns), dtype='object')
ind2 = 0
for row in range(rows):
for column in range(columns):
if column==0:
self.bidding_buttons[row, column] = Label(self.root, text=str(row+1), font=('Arial',16))
self.bidding_buttons[row, column].place(x=xoffset+column*xgap, y=yoffset+row*ygap)
else:
self.bidding_buttons[row, column] = Button(self.root, image=trupt_imgs[column-1])
self.bidding_buttons[row, column].place(x=xoffset+column*xgap, y=yoffset+row*ygap)
self.PassButton = Button(self.root, text = 'pass')
self.PassButton.place(x=xoffset+column*xgap-50, y=yoffset+row*ygap+30)
##########################################
## Bidding Display (gui)
##########################################
rows, columns = 10, 4
xoffset, yoffset = 10, 400
xgap = 70
ygap = 20
self.bidding_display = np.empty((rows,columns), dtype='object')
for row in range(rows):
for column in range(columns):
if row==0:
self.bidding_display[row, column] = Label(self.root, text=self.player[column], font=('Arial', 12))
self.bidding_display[row, column].place(x=xoffset+column*xgap, y=yoffset+row*ygap)
else:
self.bidding_display[row, column] = Label(self.root, text=" ", font=('Arial', 12))
self.bidding_display[row, column].place(x=xoffset+column*xgap, y=yoffset+row*ygap)
##############################
# Cards display (gui)
##############################
self.Buttons = np.empty((4, 13), dtype='object')
#################### Placing cards in South, West, North, East
offset = [500, 50, 500, 50]
gap = [30, 30, 30, 30]
y = [500, None, 10, None]
x = [None, 350, None, 1000]
for d_i in range(4): # d_i = direction index (S, W, N, E)
idx = np.argsort(self.card_df.iloc[13*d_i:13*(d_i+1)]['suit'].values)+(13*d_i)
for i, ix in enumerate(idx):
self.Buttons[d_i, i] = Button(self.root, image=self.card_df.iloc[ix]['img'])
### Setting several useful properties
self.Buttons[d_i, i].place(x=x[d_i] if x[d_i] else offset[d_i]+gap[d_i]*i,
y=y[d_i] if y[d_i] else offset[d_i]+gap[d_i]*i)
self.Buttons[d_i, i].img = self.mscaled_img
# Configure to call a function on clicking and return self
button_func = lambda button=self.Buttons[d_i, i]: self.card_button_func(button)
self.Buttons[d_i, i].configure(command = button_func)
### Labeling four directions
S = Label(text='S', bg='red', fg='white', font=('Arial', 22))
N = Label(text='N', bg='red', fg='white', font=('Arial', 22))
W = Label(text='W', bg='red', fg='white', font=('Arial', 22))
E = Label(text='E', bg='red', fg='white', font=('Arial', 22))
S.place(x=offset[0]+gap[0]*6.5, y=y[0]-50)
N.place(x=offset[2]+gap[2]*6.5, y=y[2]+140)
W.place(x=x[1]+100, y=offset[1]+gap[1]*6.5)
E.place(x=x[3]-30, y=offset[3]+gap[3]*6.5)
## Placing four cards in the middle
self.MidLabels = [None, None, None, None]
x = [650, 600, 650, 700]
y = [300, 250, 200, 250]
for i in range(4):
self.MidLabels[i] = Label(self.root, image=self.mscaled_img)
self.MidLabels[i].place(x=x[i], y=y[i])
## Trick board
self.NSTricks = 0
self.WETricks = 0
self.TrickDisplay = Label(self.root, text=f'Tricks:\nNS = {self.NSTricks}\nWE = {self.WETricks}',
font=("Arial", 14), bg='blue', fg='white')
self.TrickDisplay.place(x=210, y=170)
## Result board
self.ResultDisplay = Label(self.root, text='Press Start',
font=("Arial", 20), bg='black', fg='white')
self.ResultDisplay.place(x=300, y=560)
## Bidding final board
self.BidDisplay = Label(self.root, text=' ',
font=("Arial", 14), bg='blue', fg='white')
self.BidDisplay.place(x=210, y=100)
self.game_is_initialized = True
self.root.mainloop()
def clean_middle(self):
for label in self.MidLabels:
label.configure(image=self.mscaled_img)
def get_weight(self, card_name): # Get pure weights of cards 2-3---13-14 -> 2-3---K-A
if card_name[0] == 'J':
return 11
elif card_name[0] == 'Q':
return 12
elif card_name[0] == 'K':
return 13
elif card_name[0] == 'A':
return 14
else:
return int(card_name[:-1])
class RandomPlayer:
def __init__(self, declarer=False):
self.type_dict = {c:i for i,c in enumerate(['C', 'D', 'H', 'S', 'NT'], 1)}
self.bid_types = ['C', 'D', 'H', 'S', 'NT']
self.card_proba = 1/np.arange(1,8).reshape(-1,1).repeat(5,1)**2 # Descreasing probabilities of bidding higher
self.pass_proba = 10
self.declarer = declarer
def play_bid(self, last_bid):
bid_idx=None
if last_bid is not None:
last_card, last_type = last_bid.split('_')
begin_idx = (int(last_card)-1)*5 + self.type_dict[last_type]
all_proba = np.array([self.pass_proba] + self.card_proba.ravel().tolist()[begin_idx:])
else:
begin_idx = 0
all_proba = np.array([self.pass_proba] + self.card_proba.ravel().tolist())
norm_all_proba = all_proba/np.sum(all_proba)
choice = np.random.choice(len(norm_all_proba), p=norm_all_proba, size=1)[0]
if choice == 0:
bid = 'pass'
else:
bid_idx = begin_idx + choice - 1
bid = str(bid_idx//5+1) +'_'+ self.bid_types[bid_idx%5]
return bid, bid_idx
def play_move(self, cards, table, dummy_cards=None):
if self.declarer:
if len(table) == 0:
return np.random.choice(cards)
suit = table[0][-1]
same_suit_cards = [card for card in cards if card[-1]==suit]
if len(same_suit_cards)==0:
return np.random.choice(cards)
else:
return np.random.choice(same_suit_cards)
else:
if len(table) == 0:
return np.random.choice(cards)
suit = table[0][-1]
same_suit_cards = [card for card in cards if card[-1]==suit]
if len(same_suit_cards)==0:
return np.random.choice(cards)
else:
return np.random.choice(same_suit_cards)
|
import requests,time, urllib, urllib2, httplib, json, pymongo
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
req=requests.get('https://www.meneame.net/')
soup=BeautifulSoup(req.text,"html5lib")
container_clics=soup.body.find_next('div', {'id': 'container'})
newswrap_clics=container_clics.find_next('div',{'id':'newswrap'})
newsummary_clics=newswrap_clics.find_next('div', {'class':'news-summary'})
newsbody_clics=newsummary_clics.find_next('div', {'class':'news-body'})
newsshakeit_clics=newsbody_clics.find_next('div', {'class':'news-shakeit mnm-published'})
clics_string=newsshakeit_clics.find_next('div', {'class':'clics'})
clics_words=clics_string.text.split(" ")
print(clics_words[2])
variable_meneos=soup.body.find_next('div', {'id': 'variable'})
wrap_meneos=variable_meneos.find_next('div', {'id': 'wrap'})
container_meneos=wrap_meneos.find_next('div', {'id': 'container'})
newswrap_meneos=container_meneos.find_next('div',{'id':'newswrap'})
newsummary_meneos=newswrap_meneos.find_next('div', {'class':'news-summary'})
newsbody_meneos=newsummary_meneos.find_next('div', {'class':'news-body'})
newsshakeit_meneos=newsbody_meneos.find_next('div', {'class':'news-shakeit mnm-published'})
meneos_string=newsshakeit_meneos.find_next('div', {'class':'votes'})
meneos_words=meneos_string.text.split(" ")
print(meneos_words[1])
center_content_titulo=newsbody_meneos.find_next('div', {'class':'center-content'})
titulo_string_ascii=center_content_titulo.find_next('a')
titulo_string=titulo_string_ascii.text.encode('utf8')
print(titulo_string)
hora=time.strftime("%H:%M:%S")
print(hora)
fecha=time.strftime("%d/%m/%y")
print(fecha)
#WRITE API KEY: UOXSWX0IVFY9O3Y9
#READ API KEY: XPS2IBDDGV9QU4G2
write_key="UOXSWX0IVFY9O3Y9"
params=urllib.urlencode({'field1':titulo_string,'field2':clics_words[2],'field3':meneos_words[1],'field4':hora, 'field5':fecha,'key':write_key})
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = httplib.HTTPConnection("api.thingspeak.com:80")
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
data=response.read()
conn.close()
mongo_client=pymongo.MongoClient()
mongo_database=mongo_client["meneame"]
col_mongo=mongo_database["Noticia"]
noticia={"Titulo:":titulo_string, "Clics:":clics_words[2],"Meneos:":meneos_words[1], "Hora:": hora, "Fecha": fecha}
col_mongo.insert_one(noticia)
#Descomentar para borrar vaciar la base de datos MongoDB:
#col_mongo.remove()
#print(col_mongo.find().count())
|
from random import randint
bingo = randint(0,100)
n = 0
loop = True
while loop :
num = int(input("Input A Number (0-100) = "))
if num < bingo :
print("It's to small")
n+=1
elif num > bingo :
print("It's to big")
n+=1
elif n == bingo :
print("Bingo")
loop = False
if n==7:
print("You lose")
break |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tqdm import tqdm
from tensorflow.keras.utils import plot_model
# Dataset location
train_csv = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/train.csv"
test_csv = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/test.csv"
train_dir = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/train_images/"
test_dir = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/test_images/"
prep_train_dir = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/train_prep_images/"
prep_test_dir = "/home/resl/Dev/Datasets/APTOS-2019-Blindness-Detection-Dataset/test_prep_images/"
use_preprocessed = True
dump_preprocessed = True
if use_preprocessed and os.path.isdir(prep_train_dir) and os.path.isdir(prep_test_dir):
print("Using preprocessed images")
train_dir = prep_train_dir
test_dir = prep_test_dir
dump_preprocessed = False
else:
if not os.path.isdir(prep_train_dir):
os.mkdir(prep_train_dir)
if not os.path.isdir(prep_test_dir):
os.mkdir(prep_test_dir)
# Dataset Analysis
train_data = pd.read_csv(train_csv)
test_data = pd.read_csv(test_csv)
class_labels = ["No DR", "Mild", "Moderate", "Severe", "Proliferative DR"]
print("Training Size: ", len(train_data))
print("Testing Size: ", len(test_data))
plt.suptitle("Class Distribution")
plt.bar(range(5), train_data.diagnosis.value_counts())
plt.xticks(range(5), class_labels, rotation=50)
plt.ylabel("Samples")
plt.show()
# Some Visualizations
im_shape = (128, 128, 3)
def load_image(path, dim=None):
im = Image.open(path)
if dim is not None:
im = im.resize(dim,resample=Image.LANCZOS)
return im
num_classes = len(class_labels)
num_samples = 5
figure = plt.figure()
for i, label in enumerate(class_labels):
samples = train_data.loc[train_data.diagnosis==i, 'id_code'].sample(num_samples,).reset_index(drop=True)
for j in range(num_samples):
im = load_image(train_dir + str(samples[j]) + ".png", (im_shape[0], im_shape[1]))
fig_ax = figure.add_subplot(num_classes, num_classes, i+5*j+1)
fig_ax.set_xticks([], [])
fig_ax.set_yticks([], [])
if j==0:
fig_ax.set_title(class_labels[i])
fig_ax.imshow(im)
plt.show()
images = np.empty(shape=(len(train_data.index), im_shape[0], im_shape[1], 3))
labels = np.empty(shape=(len(train_data.index),))
for i, row in tqdm(train_data.iterrows()):
im = load_image(train_dir + str(row.id_code) + ".png", (im_shape[0], im_shape[1]))
images[i] = np.array(im)
labels[i] = row.diagnosis
if dump_preprocessed:
im.save(prep_train_dir + str(row.id_code) + ".png")
validation_images = images[:int(len(images)*0.2)]
validation_labels = labels[:int(len(images)*0.2)]
train_images = images[int(len(images)*0.2):]
train_labels = labels[int(len(images)*0.2):]
del(images)
print(train_images.shape, train_labels.shape)
print(validation_images.shape, validation_labels.shape)
# Time (original+saving preprocessed) : 6 min
# Time (preprocessed): 2 s (Awesome!!)
model = keras.Sequential([
keras.layers.Conv2D(
input_shape= im_shape,
filters=48,
kernel_size=11,
padding="same",
strides=4,
activation=tf.nn.relu,
name="conv-1",
),
keras.layers.MaxPool2D(
pool_size=(3,3),
strides=1,
padding="same",
name="max-pool-1",
),
keras.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=4,
activation=tf.nn.relu,
name="conv-2",
),
keras.layers.MaxPool2D(
pool_size=(3,3),
strides=1,
padding="same",
name="maxpool-2",
),
keras.layers.Conv2D(
filters=192,
kernel_size=3,
# padding="same",
# strides=4,
activation=tf.nn.relu,
name="conv-3",
),
keras.layers.Conv2D(
filters=192,
kernel_size=3,
# padding="same",
# strides=4,
activation=tf.nn.relu,
name="conv-4",
),
keras.layers.Conv2D(
filters=128,
kernel_size=3,
# padding="same",
# strides=4,
activation=tf.nn.relu,
name="conv-5",
),
keras.layers.Flatten(),
keras.layers.Dense(2048, activation=tf.nn.relu),
keras.layers.Dense(2048, activation=tf.nn.relu),
keras.layers.Dense(5, activation=tf.nn.softmax)
])
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"]
)
model.build()
model.summary()
plot_model(model, to_file="model.png", show_shapes=True)
model.fit(train_images, train_labels, epochs=20)
model.evaluate(validation_images, validation_labels)
# # test
# images = np.empty(shape=(len(test_data.index), im_shape[0], im_shape[1], 3))
# for i, row in tqdm(test_data.iterrows()):
# im = load_image(test_dir + str(row.id_code) + ".png", (im_shape[0], im_shape[1]))
# images[i] = np.array(im)
# if dump_preprocessed:
# im.save(prep_test_dir + str(row.id_code) + ".png")
#
# test_images = images
# # Time:
# # Original + Dumping: 1 min
# # Preprocessed: 1 s
# predictions = model.predict(test_images)
# # create submission
# test_data.reset_index()
# with open("submissions.csv", "w") as f:
# f.write("id_code, diagnosis\n")
# for i in range(len(predictions)):
# f.write(test_data.id_code[i] + "," + str(np.argmax(predictions[i])) + "\n")
# test_data.id_code[0]
# model.save("basic.h5")
|
# Use this to find the theoretically perfect level of TIR1 expressions
from matplotlib import pyplot as plt
import numpy as np
import math
from scipy.integrate import odeint
import random
def find_index_from_time(t_obs,time,start_index=0):
i=start_index
while i+1<len(t_obs):
if t_obs[i+1]>time:
break
i=i+1
return i
def resample_observations(t_obs_in, s_obs_in, t_obs_out):
s_obs_out=[]
pos=0
for time in t_obs_out:
i=find_index_from_time(t_obs_in,time, start_index=pos)
si = s_obs_in[i]
s_obs_out.append(si)
pos = i
return s_obs_out
def gen_next_event_time(rate):
t=random.expovariate(rate)
return t
def calc_dillution(time_of_phase, initial_volume, final_volume):
kdil=(math.log(final_volume/initial_volume))/time_of_phase
return kdil
def assymetrical_cell_division(s0,t_obs_out,params):
#--0--# Unpack parameters and species variables
permeability_IAAH, fIAAH_w, conc_out, fIAAH_c, pm_thickness, k_TIR1rnaexpression, k_RFPrnaexpression, k_BFPrnaexpression, k_rnadecay, k_translation, k_leakydegrad, k_degrad, percentage_sd,avogadro, kdil, induction_level = params
AUXIN, mRNA_TIR1, TIR1, mRNA_RFP, RFP, mRNA_BFP, BFP, VOLUME = s0
#--0--#
# create arrays for output
s_obs=[]
t_obs=[]
# read in start time and end time
t_init=t_obs_out[0]
t_final=t_obs_out[-1]
t=t_init
t_obs.append(t)
s_obs.append(s0)
induction= "false"
while t < t_final:
types=['influx','TIR1_transcription','TIR1_rna_decay','TIR1_translation','RFP_transcription','RFP_rna_decay','RFP_translation',"leaky_degradation",'RFP_degradation','BFP_transcription','BFP_rna_decay','BFP_translation','growth']
rate_influx = (permeability_IAAH/pm_thickness)*((fIAAH_w*conc_out - fIAAH_c*AUXIN))#*(4.83597586205*(VOLUME**(2/3)))))/pm_thickness)#/VOLUME
rate_TIR1_transcription = k_TIR1rnaexpression
rate_TIR1_rna_decay = k_rnadecay*mRNA_TIR1
rate_TIR1_translation = k_translation*mRNA_TIR1
rate_RFP_transcription = k_RFPrnaexpression
rate_RFP_rna_decay =k_rnadecay*mRNA_RFP
rate_RFP_translation = k_translation*mRNA_RFP
rate_leaky_degradation = k_leakydegrad*RFP*TIR1
rate_RFP_degradation = k_degrad*RFP*AUXIN*TIR1
rate_BFP_transcription = k_BFPrnaexpression
rate_BFP_rna_decay =k_rnadecay*mRNA_BFP
rate_BFP_translation =k_translation*mRNA_BFP
rate_growth = VOLUME * kdil
rates=[rate_influx,rate_TIR1_transcription,rate_TIR1_rna_decay,rate_TIR1_translation,rate_RFP_transcription,rate_RFP_rna_decay,rate_RFP_translation,rate_leaky_degradation,rate_RFP_degradation,rate_BFP_transcription,rate_BFP_rna_decay,rate_BFP_translation,rate_growth]
position=0
for i in rates:
if i < 0:
rates[position]=0
position += 1
rate_all_events=sum(rates)
if rate_all_events <= 0:
break
#next_event=gen_next_event_time(rate_all_events)
next_event=1
t+=1
AUXIN+= (rates[0]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
mRNA_TIR1+=(rates[1]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
mRNA_TIR1-= (rates[2]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
TIR1+= (rates[3]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
mRNA_RFP+= (rates[4]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
mRNA_RFP-= (rates[5]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
RFP+= (rates[6]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
RFP-= (rates[7]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
RFP-= (rates[8]*VOLUME*avogadro*next_event)/(VOLUME*avogadro)
mRNA_BFP+= ((rates[9]*VOLUME*avogadro*next_event))/(VOLUME*avogadro)
mRNA_BFP-= ((rates[10]*VOLUME*avogadro*next_event))/(VOLUME*avogadro)
BFP+= ((rates[11]*VOLUME*avogadro*next_event))/(VOLUME*avogadro)
event_dilution = (rates[12]*next_event)
AUXIN-= AUXIN-((AUXIN*VOLUME)/(VOLUME+event_dilution))
mRNA_TIR1-= mRNA_TIR1-((mRNA_TIR1*VOLUME)/(VOLUME+event_dilution))
TIR1-= TIR1-((TIR1*VOLUME)/(VOLUME+event_dilution))
mRNA_RFP-= mRNA_RFP-((mRNA_RFP*VOLUME)/(VOLUME+event_dilution))
RFP-= RFP-((RFP*VOLUME)/(VOLUME+event_dilution))
mRNA_BFP-= mRNA_BFP-((mRNA_BFP*VOLUME)/(VOLUME+event_dilution))
BFP-= BFP-((BFP*VOLUME)/(VOLUME+event_dilution))
VOLUME+= event_dilution
if t >= 60*20 and induction == "false":
rfp_start=RFP
conc_out=induction_level
induction="true"
if t == 60*65:
rfp_end=RFP
s = (AUXIN, mRNA_TIR1, TIR1, mRNA_RFP, RFP, mRNA_BFP, BFP, VOLUME)
t_obs.append(t)
s_obs.append(s)
s_obs_out=resample_observations(t_obs,s_obs,t_obs_out)
return np.array(s_obs_out), rfp_start, rfp_end
#setting seed to make results reproducible
np.random.seed=1
random.seed=1
AUXIN0 = 0
mRNA_TIR10 = 0
TIR10 = 0
mRNA_RFP0 = 1.8*9.09364077747e-10
RFP0 = 0.378*1.8
mRNA_BFP0 = 1.8*9.09364077747e-10
BFP0 = 0.378*1.8
VOLUME0 = 30*(10**(-15))
permeability_IAAH = .389 # um*s^-1
fIAAH_w = 0.25
conc_out = 0 # Taken out of the AID2 paper Assuming an average intracellular concentration of auxin of 23.8 umol per litre, look at system 1 notes, 0.03808
fIAAH_c = 0.0004
pm_thickness = 0.0092 #um https://bionumbers.hms.harvard.edu/bionumber.aspx?s=n&v=0&id=108569#:~:text=%22The%20average%20thickness%20of%20the,al.%2C%201994).%22
k_TIR1rnaexpression = 2.37e-12 # based on average expression rate of RNAs in yeast
k_RFPrnaexpression = 4e-12 # based on average expression rate of RNAs in yeast
k_BFPrnaexpression = 4e-12 # based on average expression rate of RNAs in yeast
k_rnadecay = 0.00240676104375 # per second, https://elifesciences.org/articles/32536
k_translation = 52769.9671394 # um per liter per mRNA per second
k_leakydegrad = 0.000150354734602 # umolar per second
k_degrad = 4.1718216004e-09 # umolar per second
percentage_sd = 0.00636 # Standard deviation of Sc BY4741: https://www.sciencedirect.com/science/article/pii/S2468501120300201
avogadro = 6.022*(10**23)
# Mother 0 G1
G1_length = 91*60
t_G1 = np.linspace(0,G1_length,G1_length)
G1_initial_volume=30*(10**(-15))
G1_final_volume=60*(10**(-15))
kdil = calc_dillution(G1_length,G1_initial_volume,G1_final_volume)
s0 = (AUXIN0, mRNA_TIR10, TIR10, mRNA_RFP0, RFP0, mRNA_BFP0, BFP0, VOLUME0)
params= (permeability_IAAH, fIAAH_w, conc_out, fIAAH_c, pm_thickness, k_TIR1rnaexpression, k_RFPrnaexpression, k_BFPrnaexpression, k_rnadecay, k_translation, k_leakydegrad, k_degrad, percentage_sd,avogadro, kdil)
#params= (permeability_IAAH, fIAAH_w, conc_out, fIAAH_c, pm_thickness, permeability_IAA, Nu, kdil, 0,k_PIN2expression,percentage_sd,avogadro,k_rnadecay)
#s_obs=assymetrical_cell_division(s0,t_G1,params)
#results = s_obs[:]
k_TIR1rnaexpression=1e-13
s0 = (AUXIN0, mRNA_TIR10, TIR10, mRNA_RFP0, RFP0, mRNA_BFP0, BFP0, VOLUME0)
rfp_starts=[]
rfp_ends=[]
expression_levels=[]
step=0
#for i in range(10):
while k_TIR1rnaexpression <= 1e-11:
induction_level=0
params=(permeability_IAAH, fIAAH_w, conc_out, fIAAH_c, pm_thickness, k_TIR1rnaexpression, k_RFPrnaexpression, k_BFPrnaexpression, k_rnadecay, k_translation, k_leakydegrad, k_degrad, percentage_sd,avogadro, kdil,induction_level)
calibrate=[]
round_results=[]
cal,dum,dummy = assymetrical_cell_division(s0,t_G1,params)
calibrate.append(cal)
calibrate = np.array(calibrate)
induction_level=750
params=(permeability_IAAH, fIAAH_w, conc_out, fIAAH_c, pm_thickness, k_TIR1rnaexpression, k_RFPrnaexpression, k_BFPrnaexpression, k_rnadecay, k_translation, k_leakydegrad, k_degrad, percentage_sd,avogadro, kdil,induction_level)
mRNA_TIR10=np.average(calibrate[:][...,1], axis=0)[-1]
TIR10=np.average(calibrate[:][...,2], axis=0)[-1]
mRNA_RFP0=np.average(calibrate[:][...,3], axis=0)[-1]
RFP0=np.average(calibrate[:][...,4], axis=0)[-1]
rfp_start_list=[]
rfp_end_list=[]
s0 = (AUXIN0, mRNA_TIR10, TIR10, mRNA_RFP0, RFP0, mRNA_BFP0, BFP0, VOLUME0)
gen,a,b=assymetrical_cell_division(s0,t_G1,params)
rfp_start_list.append(a)
rfp_end_list.append(b)
rfp_starts.append(sum(rfp_start_list)/len(rfp_start_list))
rfp_ends.append(sum(rfp_end_list)/len(rfp_end_list))
expression_levels.append(k_TIR1rnaexpression)
k_TIR1rnaexpression+=1e-14
step+=1
print("step: ",step)
rfp_differences = []
zip_rfps=zip(rfp_starts,rfp_ends)
for i, j in zip_rfps:
rfp_differences.append(i-j)
optimum_expression=expression_levels[rfp_differences.index(max(rfp_differences))]
print(max(rfp_differences))
print(optimum_expression)
fig=plt.figure(figsize=(12,8))
ax1=fig.add_subplot(1,1,1)
ax1.plot(expression_levels, rfp_differences,'k',label="Drop in RFP concentration")
ax1.set_xlabel("Expression level of TIR1 mRNA, in umol per litre")
ax1.set_ylabel("Drop in RFP concentration over 45 mins, in umol per litre")
ax1.legend()
plt.show()
|
# -*- coding: utf-8 -*-
import re
a = {7:['s001','s002','s027']}
for idx in a:
if 'S027'.lower() in a[idx]:
print(idx)
break
print(a)
b = []
c = b[0]
print(c) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-15 18:35
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chatter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chatter_type', models.CharField(max_length=100)),
('chatter_parent', models.CharField(max_length=500)),
('chatter_content', models.CharField(max_length=3000)),
('chatter_source', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='EternalCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('set', models.CharField(max_length=100)),
('name', models.CharField(max_length=100)),
('text', models.CharField(max_length=500)),
('cost', models.IntegerField(default=0)),
('influence', models.CharField(max_length=200)),
('colors', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), size=None)),
('rarity', models.CharField(max_length=100)),
('attack', models.IntegerField(default=0)),
('health', models.IntegerField(default=0)),
('type', models.CharField(max_length=200)),
('subtypes', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), size=None)),
('num', models.IntegerField(default=0)),
('aliases', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), size=None)),
],
),
migrations.AddField(
model_name='chatter',
name='eternalcard',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='card.EternalCard'),
),
]
|
# Sprite Animation
# Running Bunny
# This program draws an animated spiral sprite.
import simplegui
import math
import random
# Global Variables
canvas_width = 200
canvas_height = 200
image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/week8-bunny_sprite.png")
image_size = [100, 100]
image_center = [50, 50]
num_tiles = 3
cur_tile = 0
# This image doesn't look good when run at 60 fps, so
# the delay variable slows it down to 6 fps
delay = 10
# Time keeps track of how many frames have passed since
# the last tile incrementation.
time = 0
# Event Handlers
def draw(canvas):
global cur_tile, time
time += 1
# Once an amount of time has passed equal to the delay,
# the program begins to draw the next tile in the
# sprite.
if time == delay:
time = 0
cur_tile = (cur_tile + 1) % num_tiles
# The program calculates the desired center based on
# the tile number and image width.
canvas.draw_image(image, [image_center[0] + cur_tile * image_size[0], image_center[1]], image_size, [canvas_width // 2, canvas_height // 2], image_size)
# Frame
frame = simplegui.create_frame("Bouncing Sounds", canvas_width, canvas_height)
# Register Event Handlers
frame.set_draw_handler(draw)
frame.set_canvas_background("Fuchsia")
# Start
frame.start() |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 - 2013 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from tools.translate import _
from tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import time
import re
to_19 = [
'Nol',
'Satu',
'Dua',
'Tiga',
'Empat',
'Lima',
'Enam',
'Tujuh',
'Delapan',
'Sembilan',
'Sepuluh',
'Sebelas',
'Dua Belas',
'Tiga Belas',
'Empat Belas',
'Lima Belas',
'Enam Belas',
'Tujuh Belas',
'Delapan Belas',
'Sembilan Belas'
]
tens = [
'Dua Puluh',
'Tiga Puluh',
'Empat Puluh',
'Lima Puluh',
'Enam Puluh',
'Tujuh Puluh',
'Delapan Puluh',
'Sembilan Puluh'
]
ribu = [
'Seribu',
'Dua Ribu',
'Tiga Ribu',
'Empat Ribu',
'Lima Ribu',
'Enam Ribu',
'Tujuh Ribu',
'Delapan Ribu',
'Sembilan Ribu'
]
denom = [
'',
'Ribu',
'Juta',
'Miliar',
'Triliun',
'Kuadriliun',
'Kuintiliun',
'Sextillion',
'Septillion',
'Octillion',
'Nonillion',
'Decillion',
'Undecillion',
'Duodecillion',
'Tredecillion',
'Quattuordecillion',
'Sexdecillion',
'Septendecillion',
'Octodecillion',
'Novemdecillion',
'Vigintillion'
]
month_long = ['', 'Januari', 'Februari', 'Maret', 'April', 'Mei', 'Juni', 'Juli', 'Agustus', 'September', 'Oktober', 'November', 'Desember']
month_short = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'Mei', 'Jun', 'Jul', 'Agt', 'Sep', 'Okt', 'Nov', 'Des']
dow_long = ['Minggu', 'Senin', 'Selasa', 'Rabu', 'Kamis', 'Jumat', 'Sabtu']
month_long_en2id = {
'January': 'Januari',
'February': 'Februari',
'March': 'Maret',
'April': 'April',
'May': 'Mei',
'June': 'Juni',
'July': 'Juli',
'August': 'Agustus',
'September': 'September',
'October': 'Oktober',
'November': 'November',
'December': 'Desember',
}
month_short_en2id = {
'Jan': 'Jan',
'Feb': 'Feb',
'Mar': 'Mar',
'Apr': 'Apr',
'May': 'Mei',
'Jun': 'Jun',
'Jul': 'Jul',
'Aug': 'Agt',
'Sep': 'Sep',
'Oct': 'Okt',
'Nov': 'Nov',
'Dec': 'Des',
}
def _convert_nn(val):
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + ' ' + to_19[val % 10]
return dcap
def _convert_nnn(val):
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 1 and rem < 10:
word = to_19[rem] + ' Ratus'
if mod > 0:
word = word + ' '
elif rem == 1:
word = 'Seratus'
if mod > 0:
word = word + ' '
if mod > 0:
word = word + _convert_nn(mod)
return word
def _convert_nnnn(val):
word = ''
(mod, rem) = (val % 1000, val // 1000)
if rem > 1 and rem < 10:
word = to_19[rem] + ' Ribu'
if mod > 0:
word = word + ' '
elif rem == 1:
word = 'Seribu'
if mod > 0:
word = word + ' '
if mod > 0:
word = word + _convert_nnn(mod)
return word
def indonesian_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
if val < 10000:
return _convert_nnnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ' ' + indonesian_number(r)
return ret
def amount_to_text_id(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = indonesian_number(int(list[0]))
end_word = indonesian_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Sen' or 'Sen'
final_result = start_word + ' ' + units_name
if cents_number > 0:
final_result += ' dan ' + end_word + ' ' + cents_name
return final_result
def number_to_text(number):
number = '%.2f' % number
list = str(number).split('.')
start_word = indonesian_number(int(list[0]))
end_word = indonesian_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and '' or ''
final_result = start_word
if cents_number > 0:
final_result += ' koma ' + end_word
return final_result
def number_to_day(number):
number = int(number)
number = min(number, len(dow_long))
return dow_long[number]
def number_to_month(number):
number = int(number)
number = min(number, len(month_long))
return month_long[number]
def number_to_mth(number):
number = int(number)
number = min(number, len(month_short))
return month_short[number]
def translateToID(value, what="month_short"):
value = value.strip()
_dict = (what == "month_short" and month_short_en2id) or month_long_en2id
_pattern = re.compile(r'\b(' + '|'.join(_dict.keys()) + r')\b')
result = _pattern.sub(lambda x: _dict[x.group()], value)
return result
def formatDate(value=False, format='%Y-%m-%d'):
if value is False:
# No value specified, assume NOW
_rv = time.strftime(format)
elif isinstance(value, float):
# value is in seconds after epoch format
_rv = value.strftime(format)
elif isinstance(value, time.struct_time):
# value is in struct_time format
_rv = value.strftime(format)
elif isinstance(value, (str, basestring)):
# value is in string
parse_format = (len(value) == len(time.strftime(DEFAULT_SERVER_DATE_FORMAT))) and \
DEFAULT_SERVER_DATE_FORMAT or DEFAULT_SERVER_DATETIME_FORMAT
_rv = time.strftime(format, time.strptime(value, parse_format))
else:
try:
# a string-compatible format, assumed string
# DO NOT combine this section with the first one as this check must be performed after float check
value = str(value)
parse_format = (len(value) == len(time.strftime(DEFAULT_SERVER_DATE_FORMAT))) and \
DEFAULT_SERVER_DATE_FORMAT or DEFAULT_SERVER_DATETIME_FORMAT
_rv = time.strftime(format, time.strptime(value, parse_format))
except:
# unrecognized format, return empty string
_rv = ''
# Translate the month
if _rv:
if bool(re.compile('%b').findall(format)):
_rv = translateToID(_rv, what="month_short")
elif bool(re.compile('%B').findall(format)):
_rv = translateToID(_rv, what="month_long")
return _rv
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'id': amount_to_text_id}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='id', currency='Rupiah'):
"""
Converts an integer to its textual representation, using the language set in the context if any.
Example:
1654: thousands six cent cinquante-quatre.
"""
import netsvc
# if nbr > 10000000:
# netsvc.Logger().notifyChannel('translate', netsvc.LOG_WARNING, _("Number too large '%d', can not translate it"))
# return str(nbr)
if not (lang in _translate_funcs):
netsvc.Logger().notifyChannel('translate', netsvc.LOG_WARNING, _("no translation function found for lang: '%s'" % (lang,)))
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1, 200):
print i, ">>", int_to_text(i, lang)
for i in range(200, 999999, 139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
|
import cv2
import numpy as np
import hand_detection_module as hdm
import fingers as fing
cam = cv2.VideoCapture(0)
cam.set(3,640)
cam.set(4,480)
detector = hdm.Handdetector(mindetection=0.5)
imgcanvas = np.zeros((480,640,3),np.uint8)
tips = [4,8,12,16,20]
draw_col = (255,0,255)
brush_thick = 15
eraser_thick = 100
xp, yp = 0,0
while True:
succ, frame = cam.read()
detector.findHands(frame)
pos,b_box = detector.findPosition(frame,Draw=False)
fin = detector.fingerspos(pos,tips)
cv2.rectangle(frame, (0,125), (1280,125), (0,255,0),cv2.FILLED)
cv2.rectangle(frame, (30,115), (115,20), (255,0,255),cv2.FILLED)
cv2.rectangle(frame, (150,115), (240,20), (0,255,0),cv2.FILLED)
cv2.rectangle(frame, (300,115), (390,20), (0,0,255),cv2.FILLED)
cv2.rectangle(frame, (450,115), (540,20), (0,0,0),cv2.FILLED)
if len(pos)!=0:
#Index and Middle Fingers
x1, y1 = pos[8][1:]
x2, y2 = pos[12][1:]
#Selection Mode
if fin==2:
xp,yp = 0,0
if y1 < 115:
if 30<x1<115:
draw_col = (255,0,255)
#cv2.rectangle(frame, (x1,y1-15), (x2,y2+15), draw_col,cv2.FILLED)
print("Purple")
elif 150<x1<240:
draw_col = (0,255,0)
#cv2.rectangle(frame, (x1,y1-15), (x2,y2+15), draw_col,cv2.FILLED)
print("Green")
elif 300<x1<390:
draw_col = (0,0,255)
#cv2.rectangle(frame, (x1,y1-15), (x2,y2+15), draw_col,cv2.FILLED)
print("Red")
elif 450<x1<540:
draw_col = (0,0,0)
#cv2.rectangle(frame, (x1,y1-15), (x2,y2+15), draw_col,cv2.FILLED)
print("Eraser")
cv2.rectangle(frame, (x1,y1-25), (x2,y2+25), draw_col,cv2.FILLED)
#Drawing Mode
if fin==1:
if xp==0 and yp==0:
xp,yp=x1,y1
cv2.line(frame, (xp,yp), (x1,y1), draw_col,brush_thick)
if draw_col==(0,0,0):
cv2.line(imgcanvas, (xp,yp), (x1,y1), draw_col,eraser_thick)
else:
cv2.line(imgcanvas, (xp,yp), (x1,y1), draw_col,brush_thick)
xp,yp = x1,y1
imgGray = cv2.cvtColor(imgcanvas, cv2.COLOR_BGR2GRAY)
_, imginv = cv2.threshold(imgGray, 50, 255, cv2.THRESH_BINARY_INV)
imginv = cv2.cvtColor(imginv, cv2.COLOR_GRAY2BGR)
frame = cv2.bitwise_and(frame, imginv)
frame = cv2.bitwise_or(frame, imgcanvas)
#frame = cv2.addWeighted(frame, 1, imgcanvas, 1, 5)
cv2.imshow("Camera", frame)
#cv2.imshow("Canvas",imgcanvas)
key = cv2.waitKey(1)
if key==13 or key==113:
break
|
from collections import defaultdict
import pandas as pd
import math
teams = {'ANA': 'Anaheim Ducks', 'ARI': 'Arizona Coyotes', 'BOS': 'Boston Bruins', 'BUF': 'Buffalo Sabres', 'CGY': 'Calgary Flames', 'CAR': 'Carolina Hurricanes', 'CHI': 'Chicago Blackhawks', 'COL': 'Colorado Avalanche', 'CBJ': 'Columbus Blue Jackets', 'DAL': 'Dallas Stars', 'DET': 'Detroit Red Wings', 'EDM': 'Edmonton Oilers', 'FLA': 'Florida Panthers', 'L.A': 'Los Angeles Kings', 'MIN': 'Minnesota Wild', 'MTL': 'Montreal Canadiens', 'NSH': 'Nashville Predators', 'N.J': 'New Jersey Devils', 'NYI': 'New York Islanders', 'NYR': 'New York Rangers', 'OTT': 'Ottawa Senators', 'PHI': 'Philadelphia Flyers', 'PIT': 'Pittsburgh Penguins', 'S.J': 'San Jose Sharks', 'STL': 'St. Louis Blues', 'T.B': 'Tampa Bay Lightning', 'TOR': 'Toronto Maple Leafs', 'VAN': 'Vancouver Canucks', 'WSH': 'Washington Capitals', 'WPG': 'Winnipeg Jets'}
home_factor = 35.0
velocity_of_change = 8.0
regular_importance = 1.0
playoff_importance = 1.5
elo = defaultdict(lambda: 1500.0)
elo_xG = defaultdict(lambda: 1500.0)
def expectation(home, away, elos):
return (1.0 + math.pow(10, (elos[away] - elos[home] - home_factor)/400)) ** -1
def delta_elo(home, away, elos, margin, outcome, expectation, playoff=False):
delta = playoff_importance if playoff else regular_importance
delta = delta * velocity_of_change
delta = delta * max(1, math.log(abs(margin - .85 * (elos[home] - elos[away] + home_factor) / 100) + math.e - 1))
delta = delta * (outcome - expectation)
return delta
stats = pd.read_csv("Corsica_Team.Stats_2015-2017.csv", parse_dates=['Date'], infer_datetime_format=True)
stats = stats.replace({"Team": teams})
stats = stats.rename(columns = {'Team': 'Home'})
schedule2015rs = pd.read_csv("2015rs.csv", parse_dates=['Date'], infer_datetime_format=True)
schedule2015rs["Playoff"] = False
schedule2015po = pd.read_csv("2015po.csv", parse_dates=['Date'], infer_datetime_format=True)
schedule2015po["Playoff"] = True
schedule2016rs = pd.read_csv("2016rs.csv", parse_dates=['Date'], infer_datetime_format=True)
schedule2016rs["Playoff"] = False
schedule2016po = pd.read_csv("2016po.csv", parse_dates=['Date'], infer_datetime_format=True)
schedule2016po["Playoff"] = True
schedule2017rs = pd.read_csv("2017rs.csv", parse_dates=['Date'], infer_datetime_format=True)
schedule2017rs["Playoff"] = False
schedule = pd.concat([schedule2015rs, schedule2015po, schedule2016rs, schedule2016po, schedule2017rs])
games = pd.merge(stats, schedule, how='left', on=['Date', 'Home'])
games = games.rename(columns = {'Visitor': 'Away'})
games = games.sort_values(by=["Date", "Home"])
results_df = pd.DataFrame(columns=['Date', 'Home', 'Home_ELO', 'Home_ELO_xG', 'Away', 'Away_ELO', 'Away_ELO_xG', 'Expectation', 'Expectation_xG', 'G+/-', 'xG+/-', 'Points', 'Points_xG', 'Home_ELO_new', 'Home_ELO_xG_new', 'Away_ELO_new', 'Away_ELO_xG_new'])
results = []
game = {}
for index, row in games.iterrows():
game['Date'] = row['Date']
game['Home'] = row['Home']
game['Home_ELO'] = elo[row['Home']]
game['Home_ELO_xG'] = elo_xG[row['Home']]
game['Away'] = row['Away']
game['Away_ELO'] = elo[row['Away']]
game['Away_ELO_xG'] = elo_xG[row['Away']]
game['Expectation'] = expectation(row['Home'], row['Away'], elo) * 100.0
game['Expectation_xG'] = expectation(row['Home'], row['Away'], elo_xG) * 100.0
game['G+/-'] = row['G+/-']
game['xG+/-'] = row['xG+/-']
if (row['G+/-'] == 0.0):
game['Points'] = 100.0 - (abs(50.0 - game['Expectation']) * 2.0)
game['Points_xG'] = 100.0 - (abs(50.0 - game['Expectation_xG']) * 2.0)
outcome = 0.5
elif (row['G+/-'] > 0.0):
game['Points'] = game['Expectation']
game['Points_xG'] = game['Expectation_xG']
outcome = 1.0
else:
game['Points'] = 100.0 - game['Expectation']
game['Points_xG'] = 100.0 - game['Expectation_xG']
outcome = 0.0
if (row['xG+/-'] == 0.0):
outcome_xG = 0.5
elif (row['xG+/-'] > 0.0):
outcome_xG = 1.0
else:
outcome_xG = 0.0
delta = delta_elo(row['Home'], row['Away'], elo, row['G+/-'], outcome, game['Expectation']/100, row['Playoff'])
delta_xG = delta_elo(row['Home'], row['Away'], elo_xG, row['xG+/-'], outcome_xG, game['Expectation_xG']/100, row['Playoff'])
elo[row['Home']] = elo[row['Home']] + delta
elo[row['Away']] = elo[row['Away']] - delta
elo_xG[row['Home']] = elo_xG[row['Home']] + delta_xG
elo_xG[row['Away']] = elo_xG[row['Away']] - delta_xG
game['Home_ELO_new'] = elo[row['Home']]
game['Home_ELO_xG_new'] = elo_xG[row['Home']]
game['Away_ELO_new'] = elo[row['Away']]
game['Away_ELO_xG_new'] = elo_xG[row['Away']]
results.append(game)
game = {}
results_df = results_df.append(results)
results_df.to_csv("elos.csv")
|
from necrobot.botbase import cmd_seedgen
from necrobot.botbase import cmd_admin
from necrobot.botbase.botchannel import BotChannel
from necrobot.race import cmd_racemake
from necrobot.race import cmd_racestats
# from necrobot.speedrun import cmd_speedrun
# from necrobot.ladder import cmd_ladder
from necrobot.botbase import cmd_color, cmd_role
from necrobot.user import cmd_user
from necrobot.test import cmd_test
class MainBotChannel(BotChannel):
def __init__(self):
BotChannel.__init__(self)
self.channel_commands = [
cmd_admin.Die(self),
# cmd_admin.Reboot(self),
cmd_color.ColorMe(self),
# cmd_ladder.ForceRanked(self),
# cmd_ladder.Ranked(self),
# cmd_ladder.Rating(self),
# cmd_ladder.Unranked(self),
cmd_racemake.Make(self),
cmd_racemake.MakeCondor(self),
cmd_racemake.MakePrivate(self),
cmd_racestats.Fastest(self),
cmd_racestats.MostRaces(self),
cmd_racestats.Stats(self),
cmd_role.AddCRoWRole(self),
cmd_role.RemoveCRoWRole(self),
cmd_seedgen.RandomSeed(self),
# cmd_speedrun.Submit(self),
cmd_test.TestDebugMembers(self),
cmd_user.DailyAlert(self),
cmd_user.RaceAlert(self),
cmd_user.RTMP(self),
cmd_user.SetInfo(self),
cmd_user.SetPronouns(self),
cmd_user.Timezone(self),
cmd_user.Twitch(self),
cmd_user.ViewPrefs(self),
cmd_user.UserInfo(self),
]
|
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
"""Purpose: Inserts a new interval into a set of
non-overlapping intervals, merging intervals when
necessary.
"""
intervals.append(newInterval)
intervals.sort()
merged = []
for x in intervals:
if not merged or merged[-1][1] < x[0]:
merged.append(x)
else:
merged[-1][1] = max(merged[-1][1], x[1])
return merged |
#Copyright 2006 DR0ID <dr0id@bluewin.ch> http://mypage.bluewin.ch/DR0ID
#
#
#
"""
Allow to draw some gradients relatively easy.
"""
__author__ = "$Author: DR0ID $"
__version__= "$Revision: 18 $"
__date__ = "$Date: 2006-10-03 14:01:03 +0200 (Di, 03 Okt 2006) $"
import pygame
import math
def gradient(surface,
startpoint,
endpoint,
startcolor,
endcolor,
Rfunc = (lambda x:x),
Gfunc = (lambda x:x),
Bfunc = (lambda x:x),
Afunc = (lambda x:1),
type = "line",
mode = None ):
'''
surface : surface to draw on
startpoint: (x,y) point on surface
endpoint : (x,y) point on surface
startcolor: (r,g,b,a) color at startpoint
endcolor : (r,g,b,a) color at endpoint
Rfunc : function y = f(x) with startcolor =f(0) and endcolor = f(1) where 0 is at startpoint and 1 at endpoint
Gfunc : --- " ---
Bfunc : --- " ---
Afunc : --- " ---
these functions are evaluated in the range 0 <= x <= 1 and 0<= y=f(x) <= 1
type : "line", "circle" or "rect"
mode : "+", "-", "*", None (how the pixels are drawen)
returns : surface with the color characteristics w,h = (d, 256) and d = length of endpoint-startpoint
'''
dx = endpoint[0]-startpoint[0]
dy = endpoint[1]-startpoint[1]
d = int(round(math.hypot(dx, dy)))
angle = math.degrees( math.atan2(dy, dx) )
color = ColortInterpolator(d, startcolor, endcolor, Rfunc, Gfunc, Bfunc, Afunc)
if type=="line":
h = int(2.*math.hypot(*surface.get_size()))
bigSurf = pygame.Surface((d, h)).convert_alpha()
bigSurf.fill((0,0,0,0))
bigSurf.set_colorkey((0,0,0, 0))
for x in range(d):
pygame.draw.line(bigSurf, color.eval(x), (x,0), (x,h), 1)
bigSurf = pygame.transform.rotozoom(bigSurf, -angle, 1)
bigSurf.set_colorkey((0,0,0, 0))
rect = bigSurf.get_rect()
srect = pygame.Rect(rect)
dx = d/2. * math.cos(math.radians(angle))
dy = d/2. * math.sin(math.radians(angle))
rect.center = startpoint
rect.move_ip(dx, dy)
elif type=="circle":
bigSurf = pygame.Surface((2*d, 2*d)).convert_alpha()
bigSurf.fill((0,0,0,0))
for x in range(d, 0, -1):
pygame.draw.circle(bigSurf, color.eval(x), (d,d), x)
rect = bigSurf.get_rect()
srect = pygame.Rect(rect)
rect.center = (startpoint[0], startpoint[1])
elif type=="rect":
bigSurf = pygame.Surface((2*d, 2*d)).convert_alpha()
bigSurf.fill((0,0,0,0))
c = bigSurf.get_rect().center
for x in range(d,-1,-1):
r = pygame.Rect(0,0,2*x,2*x)
r.center = c
pygame.draw.rect(bigSurf, color.eval(x), r)
bigSurf = pygame.transform.rotozoom(bigSurf, -angle, 1)
bigSurf.set_colorkey((0,0,0, 0))
rect = bigSurf.get_rect()
srect = pygame.Rect(rect)
rect.center = startpoint
else:
raise NameError("type must be one of \"line\",\"circle\" or \"rect\"")
if mode is None:
surface.blit(bigSurf, rect, srect)
else:
if mode=="+":
cf = pygame.color.add
elif mode=="*":
cf = pygame.color.multiply
elif mode=="-":
cf = pygame.color.subtract
else:
raise NameError("type must be one of \"+\", \"*\", \"-\" or None")
irect = surface.get_clip().clip(rect)
for x in range(irect.left, irect.left+irect.width):
for y in range(irect.top, irect.top+irect.height):
surface.set_at((x,y), cf(surface.get_at((x,y)), bigSurf.get_at((x-rect.left, y-rect.top)) ) )
del bigSurf
char = pygame.Surface((d+1, 257))
char.fill((0,0,0))
ox = 0
oldcol = color.eval(0)
for x in range(d):
col = color.eval(x)
pygame.draw.line(char, (255,0,0), (x, 256-col[0]), (ox, 256-oldcol[0]))
pygame.draw.line(char, (0,255,0), (x, 256-col[1]), (ox, 256-oldcol[1]))
pygame.draw.line(char, (0,0,255), (x, 256-col[2]), (ox, 256-oldcol[2]))
pygame.draw.line(char, (255,255,255), (x, 256-col[3]), (ox, 256-oldcol[3]))
ox = x
oldcol = col
return char
class ColortInterpolator(object):
'''
ColorInterpolator(distance, color1, color2, rfunc, gfunc, bfunc, afunc)
interpolates a color over the distance using different functions for r,g,b,a
separately (a= alpha).
'''
def __init__(self, distance, color1, color2, rfunc, gfunc, bfunc, afunc):
object.__init__(self)
self.rInterpolator = FunctionInterpolator(color1[0], color2[0], distance, rfunc)
self.gInterpolator = FunctionInterpolator(color1[1], color2[1], distance, gfunc)
self.bInterpolator = FunctionInterpolator(color1[2], color2[2], distance, bfunc)
if len(color1)==4 and len(color2)==4:
self.aInterpolator = FunctionInterpolator(color1[3], color2[3], distance, afunc)
else:
self.aInterpolator = FunctionInterpolator(255, 255, distance, afunc)
def eval(self, x):
'''
eval(x) -> color
returns the color at the position 0<=x<=d (actually not bound to this interval).
'''
## print "colorInterp x", x, self.rInterpolator.eval(x), self.gInterpolator.eval(x), self.bInterpolator.eval(x)
return [self.rInterpolator.eval(x),
self.gInterpolator.eval(x),
self.bInterpolator.eval(x),
self.aInterpolator.eval(x)]
class FunctionInterpolator(object):
'''
FunctionINterpolator(startvalue, endvalue, trange, func)
interpolates a function y=f(x) in the range trange with
startvalue = f(0)
endvalue = f(trange)
using the function func
'''
def __init__(self, startvalue, endvalue, trange, func):
object.__init__(self)
# function
self.func = func
# y-scaling
self.a = endvalue-startvalue
if self.a == 0:
self.a = 1.
# x-scaling
if trange!=0:
self.b = 1./abs(trange)
else:
self.b = 1.
# x-displacement
self.c = 0
# y-displacement
self.d = min(max(startvalue,0),255)
def eval(self, x):
'''
eval(x)->float
return value at position x
'''
# make sure that the returned value is in [0,255]
return int(round(min(max(self.a*self.func(self.b*(x+self.c))+self.d, 0), 255)))
#------------------------------------------------------------------------------
def genericFxyGradient(surf, clip, color1, color2, func, intx, yint, zint=None):
"""
genericFxyGradient(size, color1, color2,func, intx, yint, zint=None)
some sort of highfield drawer :-)
surf : surface to draw
clip : rect on surf to draw in
color1 : start color
color2 : end color
func : function z = func(x,y)
xint : interval in x direction where the function is evaluated
yint : interval in y direction where the function is evaluated
zint : if not none same as yint or xint, if None then the max and min value
of func is taken as z-interval
color = a*func(b*(x+c), d*(y+e))+f
"""
# make shure that x1<x2 and y1<y2 and z1<z2
w,h = clip.size
x1 = min(intx)
x2 = max(intx)
y1 = min(yint)
y2 = max(yint)
if zint: # if user give us z intervall, then use it
z1 = min(zint)
z2 = max(zint)
else: # look for extrema of function (not best algorithme)
z1 = func(x1,y1)
z2 = z1
for i in range(w):
for j in range(h):
r = func(i,j)
z1 = min(z1, r)
z2 = max(z2, r)
x1 = float(x1)
x2 = float(x2)
y1 = float(y1)
y2 = float(y2)
z1 = float(z1)
z2 = float(z2)
if len(color1)==3:
color1 = list(color1)
color1.append(255)
if len(color2)==3:
color2 = list(color2)
color2.append(255)
# calculate streching and displacement variables
a = ((color2[0]-color1[0])/(z2-z1), \
(color2[1]-color1[1])/(z2-z1), \
(color2[2]-color1[2])/(z2-z1), \
(color2[3]-color1[3])/(z2-z1) ) # streching in z direction
b = (x2-x1)/float(w) # streching in x direction
d = (y2-y1)/float(h) # streching in y direction
f = ( color1[0]-a[0]*z1, \
color1[1]-a[1]*z1, \
color1[2]-a[2]*z1, \
color1[3]-a[3]*z1 )# z displacement
c = x1/b
e = y1/d
surff = pygame.surface.Surface((w,h)).convert_alpha()
# generate values
for i in range(h):
for j in range(w):
val = func(b*(j+c), d*(i+e))
#clip color
color = ( max(min(a[0]*val+f[0],255),0), \
max(min(a[1]*val+f[1],255),0), \
max(min(a[2]*val+f[2],255),0), \
max(min(a[3]*val+f[3],255),0) )
surff.set_at( (j,i), color )
surf.blit(surff, clip)
|
#!/usr/bin/env python3
"""
Script to make daily values from the IMERG 30-minute images
It sums the precipitation, converts to mm, and averages the QIND
The -8888000 (undetect) are set to 0
"""
import os
import gdal
import numpy as np
def absolute_file_paths(directory):
result = []
for dirpath, _, _filenames in os.walk(directory):
for f in _filenames:
if f.endswith('tif'):
result.append(os.path.join(dirpath, f))
return result
input_path = '/media/bram/Data/thesis/data_analysis/opera/2tiff_p/2015'
output_path = '/media/bram/Data/thesis/data_analysis/opera/3tiff_p_d/2015'
months = os.listdir(input_path)
for month_dir in months:
month_path = os.path.join(input_path, month_dir)
days = os.listdir(month_path)
for day in days:
days_path = os.path.join(month_path, day)
filenames = absolute_file_paths(days_path)
if len(filenames) == 96 and not os.path.exists(output_path+filenames[0][62:-11]+'.tif'):
first_file = gdal.Open(filenames[0])
first_array = np.array(first_file.GetRasterBand(1).ReadAsArray())
if '_q' in input_path:
count = [np.zeros(len(x)) for x in first_array]
count += first_array > 0
else:
first_array[first_array == -8888000] = 0
for filename in filenames[1:]:
file = gdal.Open(filename)
array = np.array(file.GetRasterBand(1).ReadAsArray())
if '_q' not in input_path:
array[array == -8888000] = 0
for i in range(len(array)):
for j in range(len(array[i])):
if array[i][j] != -9999000 and first_array[i][j] != -9999000:
first_array[i][j] += array[i][j]
else:
first_array[i][j] = -9999000
if '_q' in input_path:
for i, row in enumerate(array):
for j, value in enumerate(row):
if value >= 0:
count[i][j] += 1
if first_array[i][j] < 0:
first_array[i][j] = value
else:
first_array[i][j] += value
if '_q' in input_path:
for i, row in enumerate(count):
for j, value in enumerate(row):
if value != 0 and first_array[i][j] != -9999000:
first_array[i][j] /= value
else:
first_array[first_array != -9999000] *= 0.25
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.CreateCopy(output_path+filename[62:-11]+'.tif', first_file)
dst_band = dst_ds.GetRasterBand(1)
dst_band.WriteArray(first_array)
dst_band.FlushCache()
dst_band.ComputeStatistics(False)
|
import tensorflow as tf
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
import math
import json
import sys
import keras
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Flatten, Activation, add
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import initializers
from keras.engine import Layer, InputSpec
from keras import backend as K
from keras.utils import np_utils
from keras.optimizers import *
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import dataset
import argparse
import time
from datetime import timedelta
def build_dataset(data_directory, img_width):
X, y, tags = dataset.dataset(data_directory, int(img_width))
nb_classes = len(tags)
sample_count = len(y)
train_size = sample_count
print("train size : {}".format(train_size))
feature = X
label = np_utils.to_categorical(y, nb_classes)
return feature, label, nb_classes
def build_model(SHAPE, nb_classes, bn_axis, seed=None):
# We can't use ResNet50 directly, as it might cause a negative dimension
# error.
if seed:
np.random.seed(seed)
input_layer = Input(shape=SHAPE)
# block 1
x = Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(input_layer)
x = Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# block 2
x = Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# block 3
x = Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv4')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv4')(x)
# x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(input_layer, x)
return model
def main():
start_time = time.monotonic()
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input',
help='an input directory of dataset', required=True)
parser.add_argument('-d', '--dimension',
help='a image dimension', type=int, default=48)
parser.add_argument('-c', '--channel',
help='a image channel', type=int, default=3)
parser.add_argument('-e', '--epochs',
help='num of epochs', type=int, default=10)
parser.add_argument('-b', '--batch_size',
help='num of batch_size', type=int, default=64)
# parser.add_argument('-o', '--optimizer',
# help='choose the optimizer (rmsprop, adagrad, adadelta, adam, adamax, nadam)', default="adam")
parser.add_argument('-o', '--output',
help='a result file', type=str, default="output_result_vgg19.txt")
args = parser.parse_args()
# dimensions of our images.
img_width, img_height = args.dimension, args.dimension
channel = args.channel
epochs = args.epochs
batch_size = args.batch_size
SHAPE = (img_width, img_height, channel)
bn_axis = 3 if K.image_dim_ordering() == 'tf' else 1
data_directory = args.input
period_name = data_directory.split('/')
print("loading dataset")
X_train, Y_train, nb_classes = build_dataset(
"{}/train".format(data_directory), args.dimension)
X_test, Y_test, nb_classes = build_dataset(
"{}/test".format(data_directory), args.dimension)
print("number of classes : {}".format(nb_classes))
model = build_model(SHAPE, nb_classes, bn_axis)
model.compile(optimizer=Adam(lr=1.0e-4),
loss='categorical_crossentropy', metrics=['accuracy'])
# Fit the model
model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs)
# Save Model or creates a HDF5 file
model.save('{}epochs_{}batch_vgg19_model_{}.h5'.format(
epochs, batch_size, data_directory.replace("/", "_")), overwrite=True)
# del model # deletes the existing model
predicted = model.predict(X_test)
y_pred = np.argmax(predicted, axis=1)
Y_test = np.argmax(Y_test, axis=1)
cm = confusion_matrix(Y_test, y_pred)
report = classification_report(Y_test, y_pred)
tn = cm[0][0]
fn = cm[1][0]
tp = cm[1][1]
fp = cm[0][1]
if tp == 0:
tp = 1
if tn == 0:
tn = 1
if fp == 0:
fp = 1
if fn == 0:
fn = 1
TPR = float(tp)/(float(tp)+float(fn))
FPR = float(fp)/(float(fp)+float(tn))
accuracy = round((float(tp) + float(tn))/(float(tp) +
float(fp) + float(fn) + float(tn)), 3)
specitivity = round(float(tn)/(float(tn) + float(fp)), 3)
sensitivity = round(float(tp)/(float(tp) + float(fn)), 3)
mcc = round((float(tp)*float(tn) - float(fp)*float(fn))/math.sqrt(
(float(tp)+float(fp))
* (float(tp)+float(fn))
* (float(tn)+float(fp))
* (float(tn)+float(fn))
), 3)
f_output = open(args.output, 'a')
f_output.write('=======\n')
f_output.write('{}epochs_{}batch_vgg19\n'.format(
epochs, batch_size))
f_output.write('TN: {}\n'.format(tn))
f_output.write('FN: {}\n'.format(fn))
f_output.write('TP: {}\n'.format(tp))
f_output.write('FP: {}\n'.format(fp))
f_output.write('TPR: {}\n'.format(TPR))
f_output.write('FPR: {}\n'.format(FPR))
f_output.write('accuracy: {}\n'.format(accuracy))
f_output.write('specitivity: {}\n'.format(specitivity))
f_output.write("sensitivity : {}\n".format(sensitivity))
f_output.write("mcc : {}\n".format(mcc))
f_output.write("{}".format(report))
f_output.write('=======\n')
f_output.close()
end_time = time.monotonic()
print("Duration : {}".format(timedelta(seconds=end_time - start_time)))
if __name__ == "__main__":
main()
|
"""This is a trivial example of a gitrepo-based profile; The profile source code and other software, documentation, etc. are stored in in a publicly accessible GIT repository (say, github.com). When you instantiate this profile, the repository is cloned to all of the nodes in your experiment, to `/local/repository`.
This particular profile is a simple example of using a single raw PC. It can be instantiated on any cluster; the node will boot the default operating system, which is typically a recent version of Ubuntu.
Instructions:
Wait for the profile instance to start, then click on the node in the topology and choose the `shell` menu item.
"""
# Import the Portal object.
import geni.portal as portal
# Import the ProtoGENI library.
import geni.rspec.pg as pg
# Create a portal context.
pc = portal.Context()
# Create a Request object to start building the RSpec.
request = pc.makeRequestRSpec()
# Add a raw PC to the request.
node = request.RawPC("node")
node = request.RawPC("node")
node = request.RawPC("node")
# Install and execute a script that is contained in the repository.
node.addService(pg.Execute(shell="sh", command="/local/repository/silly.sh"))
# Print the RSpec to the enclosing page.
pc.printRequestRSpec(request)
|
# 8 Score, challenge, conclusion
import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
car_width = 80
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
road = (160, 160, 160)
obstacles = []
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Race Game')
clock = pygame.time.Clock()
carImg = pygame.image.load("/home/pi/gametuts/images/car_01.png")
def obstacles_avoided(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: "+str(count), True, blue)
gameDisplay.blit(text, (10, 10))
def roadlines(roady):
pygame.draw.rect(gameDisplay, white, [(display_width/2 - 5), roady, 10, 100])
def roadlines2(roady):
pygame.draw.rect(gameDisplay, white, [(display_width/2 - 5), roady, 10, 100])
# Define obstacle object
def obstacle(obx, oby, obw, obh, colour):
pygame.draw.rect(gameDisplay, colour, [obx, oby, obw, obh])
def car (x, y):
gameDisplay.blit(carImg, (x,y))
def crash():
message_display('You crashed')
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf', 90)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((display_width/2), (display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(3)
game_loop()
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def game_loop():
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
passed = 0
# Defining obstacle start parameters
ob_startx = random.randrange(0, display_width)
ob_starty = -600
ob_speed = 10
ob_width = random.randrange(30, 100)
ob_height = random.randrange(30, 100)
# Roadlines
roadline_startx = (display_width/2) - 5
roadline_starty = 0
roadline_speed = 30
roadline2_starty = -300
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(road)
roadlines(roadline_starty)
roadline_starty += roadline_speed
roadlines2(roadline2_starty)
roadline2_starty += roadline_speed
# Creates new object for obstacle
obstacle(ob_startx, ob_starty, ob_width, ob_height, black)
ob_starty += ob_speed
car(x,y)
obstacles_avoided(passed)
# Frame boundaries
if x > display_width - car_width or x < 0:
crash()
# Whenever the obstacle is off screen, re-define variables
if ob_starty > display_height:
ob_starty= 0 - ob_height
ob_startx = random.randrange(0, display_width)
ob_width = random.randrange(30, 100)
ob_height = random.randrange(30, 100)
passed += 1
ob_width = ob_width * 1.5
if roadline_starty > display_height:
roadline_starty = 0
if roadline2_starty > display_height:
roadline2_starty = 0
# Collision detecting with obstacles
if y < ob_starty + ob_height:
print('y crossover')
if x > ob_startx and x < ob_startx + ob_width or x + car_width > ob_startx and x + car_width < ob_startx + ob_width:
print('x crossover')
crash()
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
|
from wtforms import Form, StringField, IntegerField,FileField
from wtforms.validators import Length, NumberRange, DataRequired, Regexp
class SearchForm(Form):
q = StringField(validators=[Length(min=1, max=30), DataRequired()])
page = IntegerField(validators=[NumberRange(min=1, max=30)], default=1)
class DriftForm(Form):
recipient_name = StringField('收件人姓名', validators=[DataRequired(), Length(min=2, max=20)])
mobile = StringField('手机号', validators=[DataRequired(), Regexp('^1[0-9]{10}$', 0, '请输入正确的手机号')])
message = StringField('留言')
address = StringField('地址', validators=[DataRequired(), Length(min=2, max=20)])
class LengForm(Form):
title=StringField(validators=[Length(min=1, max=30), DataRequired(message='书名不能为空')])
isbn = StringField(validators=[Length(min=13, max=13), DataRequired(message='书号不能为空')])
author=StringField('作者')
publisher=StringField('出版社')
summary=StringField('简介')
image=StringField("上传图片")
|
from datetime import datetime
import logging
from django.core.management.base import BaseCommand
from aids.services.contacts import extract_aids_contact_info
class Command(BaseCommand):
"""
Find the emails and phone numbers of aids contacts.
"""
def handle(self, *args, **options):
start_time = datetime.now()
logger = logging.getLogger("console_log")
verbosity = int(options["verbosity"])
if verbosity > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
extract_aids_contact_info()
end_time = datetime.now()
logger.info(f"Command ran in {end_time - start_time}.")
|
from selenium import webdriver
import time
# Create a new instance of the Firefox driver
driver = webdriver.Firefox()
passing_flag=0
def check_page(driver, keyword):
#find all iframes on the page
iframes = driver.find_elements_by_tag_name("iframe")
frames = []
failing_counter = 0
for i in iframes:
attrs = driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', i)
#print attrs
if attrs.has_key(u'title'):
title = attrs[u'title']
if title == '3rd party ad content':
frames.append(i)
# print attrs
# print "Found", len(frames), "ad frames"
ad_number = 0
for a in frames:
driver.switch_to.frame(a)
iframes = driver.find_elements_by_tag_name("iframe")
adframe=None
src=""
for iframe in iframes:
attrs = driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', iframe)
#print attrs
if attrs.has_key(u'src') and attrs.has_key(u'width') and attrs.has_key(u'height'):
src = attrs[u'src']
width = int(attrs[u'width'])
height = int(attrs[u'height'])
if width > 0 and height > 0:
# print attrs
adframe=iframe
ad_number = ad_number + 1
if(adframe!=None):
# print "Checking ad", src
#print "Checking ad", width, "x", height
driver.switch_to.frame(adframe)
#attrs = driver.execute_script('var items = {}; for (index = 0; index < arguments[0].attributes.length; ++index) { items[arguments[0].attributes[index].name] = arguments[0].attributes[index].value }; return items;', iframes[0])
#print attrs
page_source = driver.page_source
if page_source.find(keyword) > 0:
print "Checking ad", width, "x", height, ": ", "Found keyword", keyword, "passed"
else:
print "Checking ad", width, "x", height, ": ","NOT found keyword", keyword, "failed"
# passing_flag = passing_flag + 1
failing_counter = failing_counter +1
driver.switch_to.default_content()
print "FOUND", ad_number, "adds on the page"
#files = [ 'bydu.txt', 'dexcom.txt' ]
files = [ 'dexcom.txt' ]
#keywords = [ "BYDUREON", "Dexcom" ]
keywords = [ "Dexcom" ]
for file in files:
f = open(file)
pages = []
for line in f:
line = line.strip()
if line != "":
pages.append(line)
keyword = keywords[files.index(file)]
for page in pages:
failing_counter = 0
# go to page
print "checking page", page
driver.get(page)
time.sleep(30)
check_page(driver, keyword)
if failing_counter !=0:
print "Test Failed in ", keyword, " :", failing_counter, " times"
driver.quit()
#if passing_flag == 0:
# print "TEST PASSED SUCCESSFULLY"
#else:
# print "TEST FAILED ", passing_flag, " Times" |
from flask import jsonify, Blueprint, url_for
from flask_restful import Resource, Api, reqparse, inputs, fields, marshal, marshal_with
from passlib.apps import custom_app_context as pwd_context
from application import models
from application import (
db, JWTManager, jwt_required, create_access_token,
get_jwt_identity, jwt
)
cdd_fields = {
'id': fields.Integer,
'name': fields.String,
'address': fields.String,
'latitude': fields.Float,
'longitude': fields.Float
}
cdd_request_parser = reqparse.RequestParser()
cdd_request_parser.add_argument(
'name',
required=True,
help='No name provided'
)
cdd_request_parser.add_argument(
'address',
required=True,
help='No address provided'
)
cdd_request_parser.add_argument(
'latitude'
)
cdd_request_parser.add_argument(
'longitude'
)
class CDD(Resource):
def __init__(self):
self.reqparse = cdd_request_parser
super().__init__()
@marshal_with(cdd_fields)
def get(self, id):
cdd = models.CDD.query.filter_by(id=id).one()
return cdd
def put(self, id):
args = self.reqparse.parse_args()
query = models.CDD.query.filter_by(id=id).update(args)
db.session.commit()
return (id, 200, {'Location' : url_for('resources.cdds.cdd', id=id)})
def delete(self, id):
query = models.CDD.query.filter_by(id=id).one()
db.session.delete(query)
db.session.commit()
return ('', 204, {'Location' : url_for('resources.cdds.cdds')})
class CDDs(Resource):
def __init__(self):
self.reqparse = cdd_request_parser
super().__init__()
def get(self):
cdds = [marshal(cdd, cdd_fields) for
cdd in models.CDD.query.all()]
return {'cdds' : cdds}
def post(self):
args = self.reqparse.parse_args()
cdd = models.CDD(**args)
db.session.add(cdd)
db.session.commit()
location = url_for('resources.cdds.cdd', id=cdd.id)
return (cdd.id, 201, {
'Location':location
})
cdds_api = Blueprint('resources.cdds', __name__)
api = Api(cdds_api)
api.add_resource(
CDD,
'/api/v1/cdd/<int:id>',
endpoint='cdd'
)
api.add_resource(
CDDs,
'/api/v1/cdds',
endpoint='cdds'
) |
# -*- coding: utf-8 -*-
import traceback
import random
import config
from s3 import S3, FakeFile
from mp_tasks import Tasks
from utils import login, get_accounts
from logwrapper import logger
def create_account(suffix, session):
name = config.ACCOUNT_PREFIX + str(suffix)
url = 'https://' + config.HOST + ':8080/cgi-bin/ezs3/json/add_user'
params = {
'user_id': name,
'display_name': name,
'email': name + '@test.com',
'password': 'test',
'confirm_password': 'test',
}
try:
session.get(url, params=params, verify=False)
except Exception as e:
logger.error('create account [{}] failed'.format(name))
logger.error(e)
def create_bucket(suffix, username, s3):
# 同一集群中,桶不允许重名,无论是不是属于同一个用户
name = username + config.BUCKET_PREFIX + str(suffix)
try:
s3.create_bucket(name)
except Exception as e:
logger.error('create bucket [{}] failed, account [{}]'.format(name, s3.access_key))
logger.error(e)
logger.error(traceback.format_exc())
def create_object(suffix, s3, bucket, size):
obj = FakeFile(size)
name = bucket.name + config.OBJECT_PREFIX + str(suffix)
try:
s3.upload(bucket, obj, name)
except Exception as e:
logger.error('create file [{}] failed, bucket [{}], account [{}]'.format(
name, bucket.name, s3.access_key
))
logger.error(e)
tasks = Tasks(config.CONCURRENCY)
def prepare():
session = login()
logger.info('Create accounts ...')
for i in range(config.ACCOUNT_QUANTITY):
tasks.add_task((create_account, i, session))
tasks.join()
users = get_accounts(session)
logger.info('Create buckets ...')
s3_list = []
for access_key, secret_key, uid in users:
s3 = S3(access_key, secret_key, config.HOST, uid)
s3_list.append(s3)
for i in range(config.BUCKET_PER_ACCOUNT):
tasks.add_task((create_bucket, i, uid, s3))
tasks.join()
logger.info('Create objects ...')
for s3 in s3_list:
for i in xrange(config.BUCKET_PER_ACCOUNT):
bucket = s3.get_bucket(s3.uid + config.BUCKET_PREFIX + str(i))
for j in xrange(config.OBJECT_PER_BUCKET):
size = random.randint(config.MIN_FILE_SIZE, config.MAX_FILE_SIZE)
tasks.add_task((create_object, j, s3, bucket, size))
tasks.join()
tasks.close()
if __name__ == '__main__':
import os
import sys
cwd = os.path.dirname(__file__)
local_lib = os.path.join(cwd, 'libs/lib/python2.7/site-packages')
if local_lib not in sys.path:
sys.path.insert(0, local_lib) # make sure using local 'requests' and 'boto' module instead of system's default
prepare()
sys.exit()
|
# -*- coding: utf-8 -*-
import click
import glob
import pandas as pd
import math
import json
from urllib.request import urlopen
import glob
import numpy as np
#Build pipe:
def STILT_converter(df,min_year,max_year,save_base_name):
"""
Converts a dataframe containing LATITUDE LONGITUDE Stackheight, CHEMICAL and a zagl to a csv for STILT pipeline.
===
Inputs:
1. df - dataframe containing expanded fugitive and stack releases, renamed zagl.
2. min_year - minimum year of TRI releases
3. max_year - maximum year of analysis for TRI releases
4. save_base_name - the base save name for an id mappings file (USE FOR JOIN after STILT RUN) and a stilt run (USE FOR STILT) csv file
Outputs:
1. Saves all files - no output returned
"""
#Create a base dataframe which houses all stilt runs as seperated by lat/long/stackheight/chemical/amount and year.
base_df = df[(df.YEAR >= min_year) & (df.YEAR <=max_year)][['LATITUDE','LONGITUDE','StackHeight','CHEMICAL','Release (lbs/year)','YEAR']].rename(columns={'LATITUDE':'lati','LONGITUDE':'long','CHEMICAL':'Chemical','StackHeight':'zagl'})
#Stilt only runs particles based upon location and time (concentration unnessecary. Create a subset to run simulations on and an id to remerge on (for convolutional toxicity calculation))
stilt_run_id = base_df.drop_duplicates(['lati','long','zagl','YEAR']).drop(columns=['Chemical','Release (lbs/year)']).sort_values(by='YEAR').reset_index(drop=True).reset_index().rename(columns = {'index':'id'})
#Add the id to the base_df
stilt_trace_mapping = base_df.merge(stilt_run_id, on=['lati','long','zagl','YEAR']).sort_values(by='id')
#save the files
stilt_trace_mapping.to_csv(str(save_base_name + '_IDMAPPING.csv'),index=False)
stilt_run_id.to_csv(str(save_base_name + '_RUN.csv'),index = False)
#Click represents a package to easily interface between the terminal and python
#Note all click commands must be in line with the function they are wrapping
@click.command()
@click.argument('tri_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
@click.argument('min_year')
@click.argument('max_year')
def main(tri_filepath, output_filepath, min_year, max_year):
""" Takes raw TRI data, selects out data from the min-max year and saves in format compatible with stilt.
"""
#Load TRI data:
tri_df = pd.read_csv(tri_filepath).drop(columns=['Unnamed: 0'])
#This separates fugitive and stack releases - setting the stack height of the release for fugitive releases to 0
fug = tri_df[tri_df['51-FUGITIVEAIR']>0]
fug['StackHeight']=0
fug = fug.rename(columns = {'51-FUGITIVEAIR':'Release (lbs/year)'})
fug = fug.drop(columns = ['52-STACKAIR'])
stack = tri_df[tri_df['52-STACKAIR']>0]
stack = stack.rename(columns = {'52-STACKAIR':'Release (lbs/year)'})
stack = stack.drop(columns = ['51-FUGITIVEAIR'])
#Concatenate the results together
stack_fug_df = pd.concat([stack,fug])
#Convert into the STILT format
STILT_converter(stack_fug_df,int(min_year),int(max_year),output_filepath)
if __name__ == '__main__':
main() |
# Generated by Django 2.2.13 on 2020-07-07 12:31
from django.contrib.postgres.operations import CreateExtension
import django.contrib.gis.db.models.fields
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0074_auto_20200701_0939'),
]
operations = [
CreateExtension('postgis'),
migrations.AddField(
model_name='country',
name='bbox',
field=django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326),
),
migrations.AddField(
model_name='country',
name='centroid',
field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326),
),
migrations.AddField(
model_name='country',
name='geom',
field=django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, null=True, srid=4326),
),
]
|
#============================================================================================#
# Copyright: JarvisLee
# Date: 2020/11/25
# File Name: Trainer.py
# Description: This file is used to training the model.
#============================================================================================#
# Importing the necessary library.
import os
import time
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as tud
import scipy.spatial
from tqdm import tqdm
from visdom import Visdom
from Utils.Config import argParse
from Utils.DataPreprocessor import dataLoader
from Model.SkipGramModel import SkipGramNN
# Getting the configurator.
Cfg = argParse()
# Setting the current time.
if Cfg.currentTime != -1:
currentTime = Cfg.currentTime
else:
currentTime = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
# Creating the model directory.
if not os.path.exists(Cfg.modelDir):
os.mkdir(Cfg.modelDir)
if not os.path.exists(Cfg.modelDir + f'/{currentTime}'):
os.mkdir(Cfg.modelDir + f'/{currentTime}')
# Creating the log directory.
if not os.path.exists(Cfg.logDir):
os.mkdir(Cfg.logDir)
# Setting the device and random seed.
if torch.cuda.is_available():
# Setting the device.
device = 'cuda'
# Fixing the device.
if Cfg.GPUID != -1:
torch.cuda.set_device(Cfg.GPUID)
# Fixing the random seed.
np.random.seed(Cfg.seed)
torch.cuda.manual_seed(Cfg.seed)
else:
# Setting the device.
device = 'cpu'
# Fixing the random seed.
np.random.seed(Cfg.seed)
torch.manual_seed(Cfg.seed)
# Defining the evaluation method.
def evaluator(embeddingMatrix, testingWord, itos, stoi):
# Obtaning the index of the testing word from the vocabulary.
wordIndex = stoi.get(testingWord, stoi.get('<unk>'))
# Getting the word embedding of the testing word.
wordEmbedding = embeddingMatrix[wordIndex]
# Computing the consine similarity of the testing word and other words in embedding level.
cosineDistance = np.array([scipy.spatial.distance.cosine(e, wordEmbedding) for e in embeddingMatrix])
# Returning the top ten most similar words of the testing word.
return [itos[index] for index in cosineDistance.argsort()[:30]]
# Defining the training method.
def trainer(trainSet):
# Creating the logging.
logging.basicConfig(filename = Cfg.logDir + f'/logging-{currentTime}.txt', filemode = 'a', level = logging.INFO, format = '%(asctime)s %(levelname)s %(message)s', datefmt = '%Y-%m-%d %H:%M:%S %p')
# Logging the information.
logging.info(f'''
Context Size: {Cfg.cs}
Negative Sampling: {Cfg.ns}
Vocabulary Size: {Cfg.vs}
Embedding Size: {Cfg.es}
Learning Rate: {Cfg.lr}
Weight Decay: {Cfg.wd}
Batch Size: {Cfg.bs}
Epoches: {Cfg.epoches}
Random Seed: {Cfg.seed}
GPU ID: {Cfg.GPUID}
Model Directory: {Cfg.modelDir}
Log Directory: {Cfg.logDir}
Dataset Directory: {Cfg.dataDir}
''')
# Creating the visdom.
vis = Visdom(env = 'SkipGramModel')
# Creating the graph.
lossGraph = vis.line(X = [0], Y = [0], opts = dict(legend = ['TrainingLoss'], xlabel = 'Epoches', ylabel = 'Loss', title = f'Training and Evaluating Loss - {currentTime}'), name = 'TrainingLoss')
# Creating the model.
model = SkipGramNN(Cfg.vs, Cfg.es)
# Sending the model to the correct device.
model.to(device)
# Setting the optimizer.
optimizer = optim.Adam(model.parameters(), lr = Cfg.lr, weight_decay = Cfg.wd)
# Initializing the previous cost.
previousCost = 0
# Setting the training loss.
trainLosses = []
# Training the model.
for epoch in range(Cfg.epoches):
# Initializing the cost.
trainLoss = []
# Setting the loading bar.
with tqdm(total = len(trainSet), desc = f'Epoch {epoch + 1}/{Cfg.epoches}', unit = 'batches', dynamic_ncols = True) as pbars:
# Getting the training data.
for i, (centerWords, positiveWords, negativeWords) in enumerate(trainSet):
# Sending the center words into the corresponding device.
centerWords = centerWords.to(device)
# Sending the positive words into the corresponding device.
positiveWords = positiveWords.to(device)
# Sending the negative words into the corresponding device.
negativeWords = negativeWords.to(device)
# Getting the loss.
loss = model(centerWords, positiveWords, negativeWords)
# Storing the loss.
trainLoss.append(loss.item())
# Clearing the previous gradient.
optimizer.zero_grad()
# Appling the backword propagation.
loss.backward()
# Updating the parameters.
optimizer.step()
# Updating the loading bar.
pbars.update(1)
# Updating the training information.
pbars.set_postfix_str(' - Train Loss %.4f' % (np.mean(trainLoss)))
# Closing the loading bar.
pbars.close()
# Storing the training loss.
trainLosses.append(np.mean(trainLoss))
# Logging the information.
logging.info('Epoch [%d/%d] -> Training: Loss [%.4f]' % (epoch + 1, Cfg.epoches, np.mean(trainLoss)))
# Drawing the graph.
vis.line(
X = [k for k in range(1, len(trainLosses) + 1)],
Y = trainLosses,
win = lossGraph,
update = 'new',
name = 'TrainingLoss'
)
# Storing the model.
torch.save(model.state_dict(), Cfg.modelDir + f'/{currentTime}/SkipGram-Epoch{epoch + 1}.pt')
# Providing the hint for saving model.
logging.info("Model Saved")
# Training the model.
if __name__ == "__main__":
# Getting the necessary components of data generator.
vocab, text, itos, stoi, wordFreq = dataLoader.generatorComponents(Cfg.dataDir, Cfg.vs)
# Generating the training set.
trainSet = tud.DataLoader(dataLoader(text, itos, stoi, wordFreq, Cfg.cs, Cfg.ns), batch_size = Cfg.bs, shuffle = True)
cmd = input("Please input the command ('T' for training, 'E' for evaluating, 'Exit()' for quit): ")
while cmd != 'Exit()':
if cmd == 'T':
# Training the model.
trainer(trainSet)
cmd = input("Please input the command ('T' for training, 'E' for evaluating, 'Exit()' for quit): ")
elif cmd == 'E':
try:
# Loading the paramters.
params = torch.load(Cfg.modelDir + f'/{currentTime}/SkipGram-Epoch{Cfg.epoches}.pt')
# Getting the testing words.
word = input("Please input a word ('Exit()' for quit): ")
# Indicating whether applying the testing or not.
while word != 'Exit()':
# Getting the parameters.
embeddingMatrix = params['inputEmbedding.weight'].cpu().numpy()
# Converting the testing word into lowercase.
word = str.lower(word)
# Printing the testing result.
print("The similar words of " + word + " are : " + " ".join(evaluator(embeddingMatrix, word, itos, stoi)))
# Getting another testing word.
word = input("Please input a word ('Exit()' for quit): ")
except:
# Giving the hint.
print("Please training a model first!!!")
# Applying the training.
word = 'T'
cmd = word
else:
cmd = input("Invalid Input! Please input the command ('T' for training, 'E' for evaluating, 'Exit()' for quit): ") |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_installed_packages(host):
lsof = host.package("lsof")
assert lsof.is_installed
git = host.package("git")
assert git.is_installed
telnet = host.package("telnet")
assert telnet.is_installed
ntp = host.package("ntp")
assert ntp.is_installed
iftop = host.package("iftop")
assert iftop.is_installed
unzip = host.package("unzip")
assert unzip.is_installed
net_tools = host.package("net-tools")
assert net_tools.is_installed
assert host.package("curl").is_installed
|
from rest_framework import serializers
from .models import Professor, Departamento, Curso, Disciplina, Turma, Avaliacao
class ProfessorSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Professor
fields = '__all__'
class DepartamentoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Departamento
fields = '__all__'
class CursoSerializer(serializers.ModelSerializer):
class Meta:
model = Curso
fields = '__all__'
class DisciplinaSerializer(serializers.ModelSerializer):
class Meta:
model = Disciplina
fields = '__all__'
class TurmaSerializer(serializers.ModelSerializer):
class Meta:
model = Turma
fields = '__all__'
class AvaliacaoSerializer(serializers.ModelSerializer):
class Meta:
model = Avaliacao
fields = '__all__' |
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import, division, print_function, unicode_literals
profilers = {}
profilersByUsage = {}
def registerProfiler(name, profiler, usage=None):
global profilers
global profilersByUsage
profilers[name] = profiler
if usage:
profilersByUsage[usage] = profiler
def getProfiler(name, id=None, **kwargs):
global profilers
if name not in profilers:
return None
return profilers[name](id, **kwargs)
def getProfilerByUsage(usage, id=None, **kwargs):
global profilersByUsage
if usage not in profilersByUsage:
return None
return profilersByUsage[usage](id, **kwargs)
|
from abc import ABC, abstractmethod
class Computadoras(ABC):
@abstractmethod
def __init__(self):
self.tipo = Tipo
self.caracteristicas = None
class PC(Computadoras):
print("Dipositivo de gama alta")
class Laptop(Computadoras):
print("Modelo perteneciente a Apple")
class Tipo:
print("Caracteristica/ tipo de maquina")
class CasaEscritorio(Tipo):
def grande(self):
print("Solo puede estar estatica")
class Portatil(Tipo):
print("Facíl de traer a todos lados")
|
#!/usr/bin/env python
# coding=utf-8
import os
import sys
os.chdir('/usr/local/bluedon/')
if '/usr/local/bluedon/' not in sys.path:
sys.path.append('/usr/local/bluedon/')
import json
import time
import psutil
from db.config import fetchone_sql as fetch3306
from utils.log_logger import rLog_dbg, rLog_err
from reportlog.log_clear import log_clear_release_disk
LOGNAME = 'logclear_cron_task'
LOG_DBG = lambda x: rLog_dbg(LOGNAME, x)
LOG_ERR = lambda x: rLog_err(LOGNAME, x)
def cron_task():
# stop service by systemctl first
LOG_DBG('stop service at %s' % time.ctime())
os.system('systemctl stop bdad_logclear_cron_task.timer')
try:
tnow = time.localtime()
if tnow.tm_hour == 0 and tnow.tm_min <= 10:
max_keep = update_max_keep()
LOG_DBG('run log_clear_release_disk [clear (%s) days ago]' % max_keep)
LOG_DBG('run log_clear_release_disk [keep=True]')
log_clear_release_disk(keep=True)
return
disk_threshold = update_disk_threshold()
disk_usage = psutil.disk_usage('/var').percent
if disk_usage >= disk_threshold:
LOG_DBG('run log_clear_release_disk [u(%s) >= m(%s)]' % (disk_usage, disk_threshold))
LOG_DBG('run log_clear_release_disk [disk is full]')
LOG_DBG('run log_clear_release_disk [x_days_ago = 0 keep=False]')
log_clear_release_disk(x_days_ago=0, keep=False)
return
else:
LOG_DBG('run log_clear_release_disk [disk is not full (%s)%%]' % disk_usage)
finally:
# restart service
LOG_DBG('statr service at %s' % time.ctime())
os.system('systemctl start bdad_logclear_cron_task.timer')
def update_disk_threshold():
disk_threshold = 90
try:
res = fetch3306('SELECT sValue FROM m_tbconfig WHERE sName="UseFullRate";')
thresholds = json.loads(res['sValue'])
disk_threshold = int(thresholds.get('iDisk', 90))
except Exception as e:
LOG_ERR('ERROR updating disk_threshold: %s' % e)
LOG_DBG('disk_threshold = %s' % disk_threshold)
return disk_threshold
def update_max_keep():
max_keep = 30
try:
ret = fetch3306("SELECT sValue FROM m_tbconfig WHERE sName='logConfig';")
js_ret = json.loads(ret['sValue'])
max_keep = int(js_ret['memoryTime']) * 30
except Exception as e:
LOG_ERR('ERROR getting max_keep: %s' % e)
LOG_DBG('max_keep = %s' % max_keep)
return max_keep
def check_service():
SERVICE_PATH = '/usr/lib/systemd/system'
LOCAL_SERVICE_PATH = '/usr/local/bluedon/conf/systemctl'
service_file = 'bdad_logclear_cron_task.service'
timer_file = 'bdad_logclear_cron_task.timer'
pass
SYSTEM_PATH = lambda x : os.path.join(SERVICE_PATH, x)
LOCAL_PATH = lambda x : os.path.join(LOCAL_SERVICE_PATH, x)
# check service file
if not os.path.exists(SYSTEM_PATH(service_file)):
try:
# copy service file from local path to system path
os.system('/usr/bin/cp %s %s' % (LOCAL_PATH(service_file),
SYSTEM_PATH(service_file)))
except OSError:
LOG_ERR('%s does not exists' % LOCAL_PATH(service_file))
else:
LOG_DBG('%s already exists' % LOCAL_PATH(service_file))
if not os.path.exists(SYSTEM_PATH(timer_file)):
try:
# copy service file from local path to system path
os.system('/usr/bin/cp %s %s' % (LOCAL_PATH(timer_file),
SYSTEM_PATH(timer_file)))
except OSError:
LOG_ERR('%s does not exists' % LOCAL_PATH(timer_file))
else:
LOG_DBG('%s already exists' % LOCAL_PATH(timer_file))
# reset service status
os.system('systemctl stop bdad_logclear_cron_task.timer')
os.system('systemctl daemon-reload')
os.system('systemctl start bdad_logclear_cron_task.timer')
if __name__ == '__main__':
cron_task()
|
# We use the GeoManager as the main object manager for Permit
from django.contrib.gis.db.models import GeoManager
# We use the SearchManager as a secondary manager
from djorm_pgfulltext.models import SearchManager
# Other imports required by these managers are:
from djorm_pgfulltext.fields import VectorField
from django.contrib.gis.db import models
class Permit(models.Model):
'''Stores information about a single permit'''
# The region field stores the geometric shape(s) of the permit
region = models.MultiPolygonField(srid=4326, null=True)
# All of these are non-required fields pulled from KML data
name = models.CharField(max_length=1024, null=True)
comment = models.CharField(max_length=1024, null=True)
category = models.CharField(max_length=1024, null=True)
proj_id = models.CharField(max_length=1024, null=True)
link = models.CharField(max_length=1024, null=True)
status = models.CharField(max_length=1024, null=True)
description = models.TextField(null=True)
# This manager is what allows us to make GIS queries (such as
# contains, overlap, bbcontains, etc.). It must be the main
# manager on this model type or we cannot make these queries.
objects = GeoManager()
# In order to support full text search, we have a SECOND model
# that allows for that access pattern. Attempts to use the GIS
# mixin that is available in the pgfulltext module failed
# miserably, so we go with this route for now. The main drawback
# is that we must manually update the search fields on save
# because this is not the default manager. That's not too terrible,
# however, because we only ever save from one place inside kmlutils.py.
search_index = VectorField()
text = SearchManager(
# List all the fields that you want indexed
fields = ('name', 'comment', 'category', 'proj_id', 'link', 'status'),
# This may be redundant now.
auto_update_search_field = False
)
def to_small_dict(self):
'''Return a subset of the data useful for display in the UI.'''
centroid = self.region.centroid
return {
'centroid': [ centroid.y, centroid.x ],
'category': self.category,
'comment': self.comment,
'proj_id': self.proj_id,
'status': self.status,
'name': self.name,
'link': self.link,
'id': self.id
}
def __str__(self):
return self.name
|
from django.conf.urls import url
from commodity.views import commodity_list, comcategory, speedFood, recharge, hongbao, city, village, tidings, detail
urlpatterns = [
url('^commodity_list/$',commodity_list,name='琳琅的店'),
url('^comcategory/(?P<class_id>\d*)_{1}(?P<order>\d?)$',comcategory,name='商品分类'),
url('^detail/(?P<id>\d+)/$',detail,name='商品详情'),
url('^speedFood/$',speedFood,name='飞速零食'),
url('^recharge/$',recharge,name='充值'),
url('^hongbao/$',hongbao,name='红包'),
url('^city/$',city,name='城市定位'),
url('^village/$',village,name='学校'),
url('^tidings/$',tidings,name='消息'),
] |
"""
The seq2science configuration/preprocessing is split into four parts:
* generic: all logic not related to any specific workflows
* workflows: all logic related to specific workflows
* explain: all logic necessary to make an explanation of what has/will be done
* logging: all logic related to logging to stdout/file
"""
include: "../rules/configuration_generic.smk"
include: "../rules/configuration_workflows.smk"
include: "../rules/configuration_explain.smk"
include: "../rules/configuration_logging.smk"
|
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv("Kobe_stats.csv")
data = pd.DataFrame()
data["Season"] = pd.to_datetime(df["Season"])
data["PTS"] = df["PTS"]
sns.set()
sns.relplot(x="Season", y="PTS", data=data, kind="line")
plt.xlim("1995", "2015")
plt.show()
|
""" Simulation classes that handle variations of discrete-time replicator dynamics
Classes:
:py:class:`DiscreteReplicatorDynamics`
implements generic discrete time replicator dynamics
Functions:
:py:func:`initial_set_handler`
Default handler for 'initial set' events
:py:func:`generation_report_handler`
Default handler for 'generation' events
"""
import numpy as np
import simulations.dynamics.replicator_fastfuncs as fastfuncs
from simulations.simulation import Simulation
def _create_caches(this, *args):
""" Handler to wrap around :py:meth:`DiscreteReplicatorDynamics._create_classes`
"""
this._create_caches()
this._num_profiles = this._profiles_cache.shape[0]
this._sample_profile = this._profiles_cache[0]
this._profile_size = this._profiles_cache.shape[1]
this._background_rate = np.float64(this.background_rate)
this._effective_zero = np.float64(this.effective_zero)
if this._one_or_many == DiscreteReplicatorDynamics.TYPE_ONE:
this._num_types = np.arange(len(this.types))
this._num_types_2 = np.arange(len(this.types) + 1)
this._interaction_arity = np.int(this.interaction_arity)
elif this._one_or_many == DiscreteReplicatorDynamics.TYPE_MANY:
this._num_types = np.zeros([len(this.types), max(len(type) for type in this.types)], dtype=np.int)
this._num_types_2 = np.zeros([len(this.types) + 1, max(len(type) for type in this.types)], dtype=np.int)
this._num_pops = np.arange(len(this.types))
class DiscreteReplicatorDynamics(Simulation):
""" Implements an abstract discrete-time replicator dynamics
Keyword Parameters:
effective_zero
The effective zero value for floating-point comparisons
(default 1e-10)
types
A list of names for the possible types (used to calculate
dimensionality, defaults to the return value of :py:meth:`~DiscreteReplicatorDynamics._default_types`)
background_rate
The natural rate of reproduction (parameter in the dynamics,
default 0.)
Methods to Implement:
:py:meth:`~simulations.base.Base._add_listeners`
Adds listeners to various events
:py:meth:`~DiscreteReplicatorDynamics._default_types`
Returns the default value for :py:attr:`DiscreteReplicatorDynamics.types` when no
keyword parameter is provided to :py:meth:`~DiscreteReplicatorDynamics.__init__`.
:py:meth:`~DiscreteReplicatorDynamics._null_population`
Returns a population that won't be equal to any starting population
:py:meth:`~DiscreteReplicatorDynamics._pop_equals`
Returns whether two populations are identical or not
:py:meth:`~DiscreteReplicatorDynamics._random_population`
Returns a random starting population
:py:meth:`~DiscreteReplicatorDynamics._step_generation`
Returns the next generation, given the current one
Events:
force stop(this, genct, finalgen, prevgen, firstgen)
emitted when the generation iteration is broken by a forced stop
condition (instead of stable state event)
generation(this, genct, thisgen, lastgen)
emitted when a generation is complete
initial set(this, initial_pop)
emitted when the initial population is set up
stable state(this, genct, finalgen, prevgen, firstgen)
emitted when a stable state is reached
"""
TYPE_ONE = 1
TYPE_MANY = 2
def __init__(self, *args, **kwdargs):
""" Handles several keyword parameters and sends the rest up the inheritance chain.
Keyword Parameters:
effective_zero
The effective zero value for floating-point comparisons
(default 1e-10)
types
A list of names for the possible types (used to calculate
dimensionality, defaults to the return value of :py:meth:`~DiscreteReplicatorDynamics._default_types`)
background_rate
The natural rate of reproduction (parameter in the dynamics,
default 0.)
"""
super(DiscreteReplicatorDynamics, self).__init__(*args, **kwdargs)
self.result_data = None
self.force_stop = False
if 'effective_zero' in kwdargs and kwdargs['effective_zero']:
self.effective_zero = float(kwdargs['effective_zero'])
else:
self.effective_zero = 1e-10
if 'types' in kwdargs and kwdargs['types']:
self.types = kwdargs['types']
else:
self.types = self._default_types()
if 'background_rate' in kwdargs and kwdargs['background_rate']:
self.background_rate = float(kwdargs['background_rate'])
else:
self.background_rate = 0.
self._profiles_cache = None
self._payoffs_cache = None
self._one_or_many = None
self._effective_zero = None
self._background_rate = None
self._num_profiles = None
self._num_types = None
self._num_types_2 = None
self._profile_size = None
self._interaction_arity = None
self._num_pops = None
self._sample_profile = None
self.on('initial set', _create_caches)
def _add_default_listeners(self):
""" Sets up default event listeners
Handlers:
- stable state - :py:func:`stable_state_handler`
- force stop - :py:func:`stable_state_handler`
- initial set - :py:func:`initial_set_handler`
- generation - :py:func:`generation_report_handler`
"""
super(DiscreteReplicatorDynamics, self)._add_default_listeners()
self.add_listener('stable state', stable_state_handler)
self.add_listener('force stop', stable_state_handler)
self.on('initial set', initial_set_handler)
self.on('generation', generation_report_handler)
def _default_types(self):
""" Returns a default type object for the population(s)
(should implement)
"""
return []
def _random_population(self):
""" Generate a random population of appropriate
dimensionality (should implement)
"""
return ()
def _null_population(self):
""" Generates a population guaranteed to compare falsely with a random
population (should implement)
"""
return ()
def _step_generation(self, pop):
""" Step one population or list of populations to the next generation
Parameters:
pop
The population or list of populations to send to the next generation
"""
# x_i(t+1) = (a + u(e^i, x(t)))*x_i(t) / (a + u(x(t), x(t)))
# a is background (lifetime) birthrate
if self._profiles_cache is None or self._payoffs_cache is None:
_create_caches(self)
if self._one_or_many == self.TYPE_ONE:
return fastfuncs.one_dimensional_step(pop,
self._profiles_cache,
self._sample_profile,
self._payoffs_cache,
self._num_types,
self._num_types_2,
self._interaction_arity,
self._background_rate,
self._effective_zero,
self._num_profiles,
self._profile_size)
if self._one_or_many == self.TYPE_MANY:
return fastfuncs.n_dimensional_step(pop,
self._profiles_cache,
self._sample_profile,
self._payoffs_cache,
self._num_types,
self._num_types_2,
self._background_rate,
self._effective_zero,
self._num_pops,
self._num_profiles,
self._profile_size)
def _run(self, initial_pop=None):
""" Actually run the simulation
Parameters:
initial_pop
(optional) initial population. Randomizes if not provided.
"""
if initial_pop is None:
initial_pop = self._random_population()
this_generation = initial_pop
self.emit('initial set', self, initial_pop)
last_generation = self._null_population()
generation_count = 0
last_equal = 0
while last_equal != 1 and not self.force_stop:
generation_count += 1
last_generation = this_generation.copy()
tmp = self._step_generation(last_generation)
last_equal = tmp[0:1][0]
try:
last_equal = last_equal[0]
except IndexError:
pass
this_generation = tmp[1:].copy()
self.emit('generation',
self,
generation_count,
this_generation,
last_generation)
if self.force_stop:
self.emit('force stop',
self,
generation_count,
this_generation,
last_generation,
initial_pop)
else:
self.emit('stable state',
self,
generation_count,
this_generation,
last_generation,
initial_pop)
return (generation_count,
initial_pop,
this_generation,
self.result_data)
def stable_state_handler(this, genct, thisgen, lastgen, firstgen):
""" Print out a report when a stable state is reached.
Parameters:
this
a reference to the simulation
genct
the number of generations
thisgen
the stable state population
lastgen
the previous population
firstgen
the initial population
"""
print >> this.out, "=" * 72
if this.force_stop:
fstr = "Force stop! ({0} generations)"
else:
fstr = "Stable state! ({0} generations)"
print >> this.out, fstr.format(genct)
def initial_set_handler(this, initial_pop):
""" Handles the 'initial set' event by default for discrete
replicator dynamics
Parameters:
this
a reference to the simulation
initial_pop
the initial population
"""
print >> this.out, "Initial State: {0}".format(initial_pop)
print >> this.out
def generation_report_handler(this, genct, thisgen, lastgen):
""" Print out a report of the current generation
Parameters:
this
a reference to the simulation
genct
the generation number
thisgen
the current population
lastgen
the previous population
"""
print >> this.out, "-" * 72
print >> this.out, "Generation {0}:".format(genct)
print >> this.out, "\t{0}".format(thisgen)
print >> this.out
this.out.flush()
|
import torch
import torch.nn as nn
import torchvision.models as models
import random
random.seed(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class EncoderCNN(nn.Module):
def __init__(self, encoded_image_size=14):
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
# delete the last fc layer and pool layer.
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
self.adaptive_pool = nn.AdaptiveAvgPool2d(
(encoded_image_size, encoded_image_size))
def forward(self, images):
with torch.no_grad():
features = self.resnet(images)
# (batch_size, 2048, encoded_image_size, encoded_image_size)
features = self.adaptive_pool(features)
# (batch_size, encoded_image_size, encoded_image_size, 2048)
features = features.permute(0, 2, 3, 1)
return features
class Attention(nn.Module):
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(Attention, self).__init__()
# linear layer to transform encoded image
self.encoder_att = nn.Linear(encoder_dim, attention_dim)
# linear layer to transform decoder's output
self.decoder_att = nn.Linear(decoder_dim, attention_dim)
# linear layer to calculate values to be softmax-ed
self.full_att = nn.Linear(attention_dim, 1)
self.relu = nn.ReLU()
# softmax layer to calculate weights
self.softmax = nn.Softmax(dim=1)
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
# (batch_size, num_pixels, attention_dim)
att1 = self.encoder_att(encoder_out)
# (batch_size, attention_dim)
att2 = self.decoder_att(decoder_hidden)
# (batch_size, num_pixels)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2)
# (batch_size, num_pixels)
alpha = self.softmax(att)
# (batch_size, encoder_dim)
w = encoder_out * alpha.unsqueeze(2)
attention_weighted_encoding = (w).sum(dim=1)
return attention_weighted_encoding, alpha
class DecoderRNNAtt(nn.Module):
def __init__(self,
attention_size,
embed_size,
hidden_size,
vocab_size,
num_layers,
feature_size=2048,
dropout=0.22,
max_seq_length=40):
super(DecoderRNNAtt, self).__init__()
self.attention_size = attention_size
self.feature_size = feature_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.max_seq_length = max_seq_length
# linear layer to find initial hidden state
self.init_h = nn.Linear(feature_size, hidden_size)
# linear layer to find initial cell state
self.init_c = nn.Linear(feature_size, hidden_size)
# dropout
self.dropout = nn.Dropout(dropout)
# attention network
self.attention = Attention(feature_size, hidden_size, attention_size)
# embedding
self.embed = nn.Embedding(vocab_size, embed_size)
# linear layer to create a sigmoid-activated gate
self.f_beta = nn.Linear(hidden_size, feature_size)
self.sigmoid = nn.Sigmoid()
# lstm
self.lstm = nn.LSTMCell(embed_size + feature_size,
hidden_size,
bias=True)
# weight for output
self.linear = nn.Linear(hidden_size, vocab_size)
self.reset_parameters()
self.init_weights()
def reset_parameters(self):
# std = 1.0 / math.sqrt(self.hidden_size)
for p in self.parameters():
if p.data.ndimension() >= 2:
nn.init.xavier_uniform_(p.data)
else:
nn.init.zeros_(p.data)
def init_weights(self):
"""
Initializes some parameters with values from the uniform distribution, for easier convergence.
"""
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
self.linear.weight.data.uniform_(-0.1, 0.1)
def init_hidden_state(self, feature):
"""
Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.
:param feature: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:return: hidden state, cell state
"""
mean_feature = feature.mean(dim=1)
h = self.init_h(mean_feature) # (batch_size, decoder_dim)
c = self.init_c(mean_feature)
return h, c
def forward_step(self, embedded, states):
h_t, c_t = states
h_t, c_t = self.lstm(embedded, (h_t, c_t))
return h_t, (h_t, c_t)
def forward(self, captions, lengths, features, teacher_forcing_ratio=0.8):
batch_size = captions.size(0)
feature_size = features.size(-1)
# Flatten image
# (batch_size, num_pixels, encoder_dim)
features = features.view(batch_size, -1, feature_size)
num_pixels = features.size(1)
# embeddings
embeddings = self.embed(captions)
embeddings = self.dropout(embeddings)
pack_padded_sequence = nn.utils.rnn.pack_padded_sequence
packed = pack_padded_sequence(embeddings, lengths, batch_first=True)
# (batch_size, decoder_dim)
h_t, c_t = self.init_hidden_state(features)
alphas = torch.zeros(batch_size, max(lengths), num_pixels).to(device)
hiddens = []
predicted = captions[:, 0:1]
for i, b_sz in enumerate(packed.batch_sizes):
attention_weighted_encoding, alpha = self.attention(
features[:b_sz], h_t[:b_sz])
# gating scalar, (batch_size_t, encoder_dim)
gate = self.sigmoid(self.f_beta(h_t[:b_sz]))
attention_weighted_encoding = gate * attention_weighted_encoding
if random.random() < teacher_forcing_ratio:
emb = embeddings[:b_sz, i, :]
else:
emb = self.embed(predicted)[:b_sz, 0, :]
inputs = torch.cat([emb, attention_weighted_encoding], dim=1)
h_t, c_t = h_t[:b_sz, :], c_t[:b_sz, :]
hidden, (h_t, c_t) = self.forward_step(inputs, (h_t, c_t))
hiddens.append(hidden)
alphas[:b_sz, i, :] = alpha
output = self.linear(hidden)
_, predicted = output.max(1)
predicted = predicted.unsqueeze(1)
hiddens = torch.cat(hiddens, 0)
outputs = self.linear(hiddens)
return outputs, alphas
def sample(self, features, start_token, end_token, k=5):
"""Generate captions for given image features using beam search."""
# enc_image_size = features.size(1)
feature_size = features.size(-1)
# batch_size = features.size(0)
features = features.view(1, -1, feature_size)
num_pixels = features.size(1)
# (k, num_pixels, encoder_dim)
features = features.expand(k, num_pixels, feature_size)
# (k, 1)
k_prev_words = torch.LongTensor([[start_token]] * k).to(device)
seqs = k_prev_words
top_k_scores = torch.zeros(k, 1).to(device)
# Lists to store completed sequences and scores
complete_seqs = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h_t, c_t = self.init_hidden_state(features)
while True:
# (s, embed_dim)
embeddings = self.embed(k_prev_words).squeeze(1)
# (s, encoder_dim), (s, num_pixels)
awe, _ = self.attention(features, h_t)
# gating scalar, (s, encoder_dim)
gate = self.sigmoid(self.f_beta(h_t))
awe = gate * awe
inputs = torch.cat([embeddings, awe], dim=1)
res = self.forward_step(inputs, (h_t, c_t))
hidden, (h_t, c_t) = res
# (s, vocab_size)
output = self.linear(hidden)
scores = torch.nn.functional.log_softmax(output, dim=1)
# Add
# (s, vocab_size)
scores = top_k_scores.expand_as(scores) + scores
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
# (s)
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True)
else:
# Unroll and find top scores, and their unrolled indices
# (s)
top_k_scores, top_k_words = scores.view(-1).topk(
k, 0, True, True)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words / self.vocab_size # (s)
next_word_inds = top_k_words % self.vocab_size # (s)
# Add new words to sequences
# (s, step+1)
seqs = torch.cat(
[seqs[prev_word_inds],
next_word_inds.unsqueeze(1)], dim=1)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [
ind for ind, next_word in enumerate(next_word_inds)
if next_word != end_token
]
complete_inds = list(
set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
# reduce beam length accordingly
k -= len(complete_inds)
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
h_t = h_t[prev_word_inds[incomplete_inds]]
c_t = c_t[prev_word_inds[incomplete_inds]]
features = features[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > self.max_seq_length:
break
step += 1
# prevent empty sequence
if len(complete_seqs_scores) == 0:
return torch.Tensor([[end_token]]).long().to(device)
i = complete_seqs_scores.index(max(complete_seqs_scores))
seq = torch.Tensor([complete_seqs[i]]).long().to(device)
return seq
|
# -*- coding: utf-8 -*-
"""
다차원 배열의 넘파이 계산 기초
"""
import numpy as np
# 1차원 배열 A
A = np.array([1, 2, 3, 4])
print("== A ==")
print(np.ndim(A)) # 배열의 차원 수를 확인하는 함수
print(A.shape) # 배열의 형상 확인 원소의 개수를 알 수 있다. 단, tuple형태로 반환
# 2차원 배열 B
B = np.array([[1, 2], [3, 4], [5, 6]])
print("== B ==")
print(np.ndim(B))
print(B.shape) # 결과값인 (3, 2)는 처음 차원의 원소의 개수, 다음 차원의 원소의 개수를 뜻한다.
#행렬의 곲
print("== 행렬의 곲 ==")
#행렬의 계산은 왼쪽 행렬의 행과 오른 쪽 행렬의 열을 원소별로 곱하고 그 값들을 더한다.
A = np.array([[1, 2], [3, 4]])
print(A.shape)
B = np.array([[5, 6], [7, 8]])
print(B.shape)
C = np.array([1, 2])
print(np.dot(A, B)) # 행렬의 곱을 계산하는 함수. numpy.dot(행렬)
print(np.dot(B, A))
print(np.dot(C, A)) # 1차원 배열인 벡터와 행렬의 곱은 벡터가 된다.
D = np.dot(A, B)
print(D.shape)
#행렬의 곱에서는 행렬의 형상에 주의 해야 한다.
# 구체적으로 이야기하면, 왼쪽 행렬의 1번 째 차원의 원소의 수와 오른 쪽 행렬의 0번째
# 차원의 원소의 수가 같아야 계산이 가능하다. |
"""
Cazador file/cloud service investigator objects module.
This portion of the module handles the simple object types expected as the
result of a Cazador operation.
Created: 08/24/2016
Creator: Nathan Palmer
"""
import hashlib
import re
class CazFile:
"""Simple file metadata object."""
def __init__(self, file_id, name, parent, sha1=None, md5=None, path=None):
"""CazFile initializer."""
self.file_id = str(file_id) if not None else None
self.name = str(name) if not None else None
self.parent = str(parent) if not None else None
self.sha1 = str(sha1) if not None else None
self.md5 = str(md5) if not None else None
self.path = str(path) if not None else None
def __str__(self):
"""String print helper."""
return """[{} ({})] Parent:{}
Path:{}
SHA1:{} MD5:{}""".format(self.name,
self.file_id,
self.parent,
self.path,
self.sha1,
self.md5)
class CazRegEx:
"""Simple wrapper for a compiled named regular expression."""
def __init__(self, name, expression):
"""CazRegEx initializer."""
self.name = name
# Compile the regex so it can be more efficiently reused
self.regex = re.compile(expression)
class CazRegMatch:
"""Simple wrapper for a regex match."""
def __init__(self, match, file_path, line, regex_name):
"""CazRegMatch initializer."""
# store only a hash of the value
self.hash = hashlib.sha1(match.group(0).encode('utf-8')).hexdigest()
self.expression_name = regex_name
self.location = (match.start(), match.end())
self.line_number = line
self.file_path = file_path
def __str__(self):
"""String print helper."""
return "{} detected a match for {} in {} at location {} line {}.".format(self.hash,
self.expression_name,
self.file_path,
self.location,
self.line_number)
|
from matplotlib import pyplot as plt
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
from ssd_vgg import SSD_VGG
from ssd_mobilenetv2 import SSD_MobileNetV2
from ssd_mobilenetv3 import SSD_MobileNetV3
import argparse
parser = argparse.ArgumentParser(description='Test Params')
parser.add_argument('--weights')
parser.add_argument('--imagepath')
parser.add_argument('--network')
args = parser.parse_args()
means = (127, 127, 127)
if args.network == 'vgg':
net = SSD_VGG(phase='test', num_classes=2)
net.load_state_dict(torch.load(args.weights, map_location=lambda storage, loc: storage))
net.eval()
elif args.network == 'mobilenetv2':
net = SSD_MobileNetV2(phase='test', num_classes=2)
net.load_state_dict(torch.load(args.weights, map_location=lambda storage, loc: storage))
net.eval()
elif args.network == 'mobilenetv3':
net = SSD_MobileNetV3(phase='test', num_classes=2)
net.load_state_dict(torch.load(args.weights, map_location=lambda storage, loc: storage))
net.eval()
test_images = os.listdir(args.imagepath)
font = cv2.FONT_HERSHEY_SIMPLEX
for image_name in test_images:
org_img = cv2.imread(args.imagepath + image_name)
image = cv2.imread(args.imagepath + image_name)
height, width, _ = image.shape
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (300, 300))
image = np.array(image, dtype=np.float32)
means = np.array(means, dtype=np.float32)
image -= means
image = image[:, :, (2, 1, 0)]
image = torch.from_numpy(image).float().permute(2, 0, 1)
image = Variable(image.unsqueeze(0))
detections = net(image).data
for i in range(1, detections.size(1)):
dets = detections[0, i, :]
mask = dets[:, 0].gt(0.65).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
boundingboxes = dets[:, 1:]
boundingboxes[:, 0] *= width
boundingboxes[:, 1] *= height
boundingboxes[:, 2] *= width
boundingboxes[:, 3] *= height
scores = dets[:, 0].cpu().numpy()
for i in range(scores.shape[0]):
cv2.rectangle(org_img, (int(boundingboxes[i][0]),int(boundingboxes[i][1])),(int(boundingboxes[i][2]),int(boundingboxes[i][3])),(0,0,255),3)
cv2.rectangle(org_img, (int(boundingboxes[i][0]), int(boundingboxes[i][3]) - 35), (int(boundingboxes[i][2]), int(boundingboxes[i][3])), (0, 0, 255), cv2.FILLED)
cv2.putText(org_img, 'fish : ' + str("{0:.2f}".format(scores[i])), (int(boundingboxes[i][0]) + 6, int(boundingboxes[i][3]) - 6), font, 1.4, (255, 255, 255), 1)
cv2.imwrite('./results/' + image_name, org_img)
|
#!/usr/bin/env python
import sys, re
key = "i hope in the next ten years there would be no other farewell letter brilliant than this one"
def decrypt(content):
encrypted = re.sub("\s", "", content).split(",")
decrypted = "".join([chr(int(encrypted[i]) ^ ord(key[i % len(key)])) for i in range(len(encrypted))])
return decrypted
with open(sys.argv[1], "r") as f:
print decrypt(f.read()),
|
from __future__ import absolute_import, unicode_literals
from django_ppf.celery import app
from assistant.utils import (
update_prices,
import_parameters_form_prom,
parse_mizol,
)
from assistant.utils import make_xml, make_xlsx_for_prom
@app.task(name='assistant.update_mizol_prices_task')
def update_mizol_prices_task(filename, vendor_name):
update_prices(filename, vendor_name)
@app.task(name='assistant.import_parameters_form_prom')
def import_parameters_form_prom_task(filename):
import_parameters_form_prom(filename)
@app.task(name='assistant.add_new_products_by_mizol')
def add_new_products_by_mizol():
parse_mizol()
@app.task
def update_horoz_task():
from assistant.utils import ParseHoroz
ph = ParseHoroz(link='https://horozua.com/index.php?route=feed/yandex_yml', my_currency_code='USD')
ph.set_products()
ph.add_or_update_products_in_db()
@app.task
def run_xml_spider():
from spider.utils.spider import Spider
spider = Spider.init()
spider.start_parse()
@app.task
def make_xml_for_rozetka():
make_xml()
@app.task
def make_xlsx_for_prom_task():
make_xlsx_for_prom()
|
from flask import Flask, render_template, redirect, url_for, request
app = Flask(__name__)
@app.route('/store_file', methods=['post'])
def store_file():
file_name = request.form.get('filename')
if not file_name or ("." not in file_name):
return
file_content = request.form.get('content')
with open('/filestore/' + file_name, 'w') as fp:
fp.write(file_content)
return redirect(url_for('index'))
@app.route('/')
def index():
return render_template('index.html')
app.run(host="0.0.0.0")
|
from heapq import heappush, heappop
INF = float("inf")
v_num, e_num, r = list(map(int, input().split()))
edges_l = [[] for _ in range(v_num)]
dist_l = [INF for _ in range(v_num)]
for _ in range(e_num):
s, t, dist = map(int, input().split())
edges_l[s].append((t, dist))
que = []
heappush(que, (0, r))
dist_l[r] = 0
while que:
dist, node = heappop(que)
for to, cost in edges_l[node]:
if dist_l[to] > cost + dist:
dist_l[to] = cost + dist
heappush(que, (cost + dist, to))
for dist in dist_l:
if dist == INF:
print("INF")
else:
print(dist)
|
# -*- coding: utf-8 -*-
"""Class for the ogs KINETRIC REACTION file."""
from ogs5py.fileclasses.base import BlockFile
class KRC(BlockFile):
"""
Class for the ogs KINETRIC REACTION file.
Parameters
----------
task_root : str, optional
Path to the destiny model folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task.
Default: "model"
Notes
-----
Main-Keywords (#):
- MICROBE_PROPERTIES
- REACTION
- BLOB_PROPERTIES
- KINREACTIONDATA
Sub-Keywords ($) per Main-Keyword:
- MICROBE_PROPERTIES
- MICROBENAME
- _drmc__PARAMETERS
- MONOD_REACTION_NAME
- REACTION
- NAME
- TYPE
- BACTERIANAME
- EQUATION
- RATECONSTANT
- GROWTH
- MONODTERMS
- THRESHHOLDTERMS
- INHIBITIONTERMS
- PRODUCTIONTERMS
- PRODUCTIONSTOCH
- BACTERIAL_YIELD
- ISOTOPE_FRACTIONATION
- BACTERIA_SPECIFIC_CAPACITY
- TEMPERATURE_DEPENDENCE
- _drmc_
- STANDARD_GIBBS_ENERGY
- EXCHANGE_PARAMETERS
- SORPTION_TYPE
- NAPL_PROPERTIES
- REACTION_ORDER
- MINERALNAME
- CHEMAPPNAME
- EQUILIBRIUM_CONSTANT
- RATE_EXPONENTS
- REACTIVE_SURFACE_AREA
- PRECIPITATION_BY_BASETERM_ONLY
- PRECIPITATION_FACTOR
- PRECIPITATION_EXPONENT
- BASETERM
- MECHANISMTERM
- SWITCH_OFF_GEOMETRY
- BLOB_PROPERTIES
- NAME
- D50
- DM
- DS
- UI
- NAPL_CONTENT_INI
- NAPL_CONTENT_RES
- GRAIN_SPHERE_RATIO
- TORTUOSITY
- LENGTH
- CALC_SHERWOOD
- CALC_SHERWOOD_MODIFIED
- SHERWOOD_MODEL
- GEOMETRY
- GAS_DISSOLUTION
- INTERFACIAL_AREA
- KINREACTIONDATA
- SOLVER_TYPE
- RELATIVE_ERROR
- MIN_TIMESTEP
- INITIAL_TIMESTEP
- BACTERIACAPACITY
- MIN_BACTERIACONC
- MIN_CONCENTRATION_REPLACE
- SURFACES
- ALLOW_REACTIONS
- NO_REACTIONS
- COPY_CONCENTRATIONS
- LAGNEAU_BENCHMARK
- SCALE_DCDT
- SORT_NODES
- OMEGA_THRESHOLD
- REACTION_DEACTIVATION
- DEBUG_OUTPUT
- ACTIVITY_MODEL
Standard block:
None
Keyword documentation:
https://ogs5-keywords.netlify.com/ogs/wiki/public/doc-auto/by_ext/krc
Reading routines:
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp
MICROBE_PROPERTIES :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L232
REACTION :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L1549
BLOB_PROPERTIES :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L2622
KINREACTIONDATA :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L3185
See Also
--------
add_block
"""
MKEYS = [
"MICROBE_PROPERTIES",
"REACTION",
"BLOB_PROPERTIES",
"KINREACTIONDATA",
]
# these are not sorted at the moment
SKEYS = [
[ # MICROBE_PROPERTIES
"MICROBENAME",
"_drmc__PARAMETERS",
"MONOD_REACTION_NAME",
],
[ # REACTION
"NAME",
"TYPE",
"BACTERIANAME",
"EQUATION",
"RATECONSTANT",
"GROWTH",
"MONODTERMS",
"THRESHHOLDTERMS",
"INHIBITIONTERMS",
"PRODUCTIONTERMS",
"PRODUCTIONSTOCH",
"BACTERIAL_YIELD",
"ISOTOPE_FRACTIONATION",
"BACTERIA_SPECIFIC_CAPACITY",
"TEMPERATURE_DEPENDENCE",
"_drmc_",
"STANDARD_GIBBS_ENERGY",
"EXCHANGE_PARAMETERS",
"SORPTION_TYPE",
"NAPL_PROPERTIES",
"REACTION_ORDER",
"MINERALNAME",
"CHEMAPPNAME",
"EQUILIBRIUM_CONSTANT",
"RATE_EXPONENTS",
"REACTIVE_SURFACE_AREA",
"PRECIPITATION_BY_BASETERM_ONLY",
"PRECIPITATION_FACTOR",
"PRECIPITATION_EXPONENT",
"BASETERM",
"MECHANISMTERM",
"SWITCH_OFF_GEOMETRY",
],
[ # BLOB_PROPERTIES
"NAME",
"D50",
# "CALC_SHERWOOD",
"DM",
"DS",
"UI",
"NAPL_CONTENT_INI",
"NAPL_CONTENT_RES",
"GRAIN_SPHERE_RATIO",
"TORTUOSITY",
"LENGTH",
"CALC_SHERWOOD",
"CALC_SHERWOOD_MODIFIED",
"SHERWOOD_MODEL",
"GEOMETRY",
"GAS_DISSOLUTION",
"INTERFACIAL_AREA",
],
[ # KINREACTIONDATA
"SOLVER_TYPE",
"RELATIVE_ERROR",
"MIN_TIMESTEP",
"INITIAL_TIMESTEP",
"BACTERIACAPACITY",
"MIN_BACTERIACONC",
"MIN_CONCENTRATION_REPLACE",
"SURFACES",
"ALLOW_REACTIONS",
"NO_REACTIONS",
"COPY_CONCENTRATIONS",
"LAGNEAU_BENCHMARK",
"SCALE_DCDT",
"SORT_NODES",
"OMEGA_THRESHOLD",
"REACTION_DEACTIVATION",
"DEBUG_OUTPUT",
"ACTIVITY_MODEL",
"REALATIVE_ERROR", # really?
"MAX_TIMESTEP", # really?
],
]
STD = {}
def __init__(self, **OGS_Config):
super().__init__(**OGS_Config)
self.file_ext = ".krc"
|
# Code from Tutorial
# https://machinelearningmastery.com/machine-learning-in-python-step-by-step/
# Load libraries
import pandas
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Load dataset
#url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
url = "iris-data.txt"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# shape
print("\nDimensions of the dataset (rows, columns): " + str(dataset.shape) + "\n")
# head
print("\nPeek at the data itself: \n\n" + str(dataset.head(5)) + "\n")
# descriptions
print("\nStatistical summary of all attributes: \n\n" + str(dataset.describe()))
# class distribution
print("\nBreakdown of the data by the class variable: \n\n" + str(dataset.groupby('class').size()) + "\n")
# # box and whisker plots
# dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
# plt.show()
# # histograms
# dataset.hist()
# plt.show()
# # scatter plot matrix
# scatter_matrix(dataset)
# plt.show()
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric (10-fold cross validation to estimate accuracy)
seed = 7
scoring = 'accuracy'
# Let’s evaluate 6 different algorithms:
# Logistic Regression (LR)
# Linear Discriminant Analysis (LDA)
# K-Nearest Neighbors (KNN).
# Classification and Regression Trees (CART).
# Gaussian Naive Bayes (NB).
# Support Vector Machines (SVM).
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
print()
# We can see that it looks like KNN has the largest estimated accuracy score.
# We can also create a plot of the model evaluation results and compare the
# spread and the mean accuracy of each model. There is a population of accuracy measures
# for each algorithm because each algorithm was evaluated 10 times (10 fold cross validation).
# # Compare Algorithms
# fig = plt.figure()
# fig.suptitle('Algorithm Comparison')
# ax = fig.add_subplot(111)
# plt.boxplot(results)
# ax.set_xticklabels(names)
# plt.show()
# We can run the KNN model directly on the validation set and summarize the results
# as a final accuracy score, a confusion matrix and a classification report.
# Make predictions on validation dataset
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# We can see that the accuracy is 0.9 or 90%. The confusion matrix provides an indication of the three errors made.
# Finally, the classification report provides a breakdown of each class by precision, recall, f1-score and support
# showing excellent results (granted the validation dataset was small).
print() |
import os
import subprocess
import re
from colour import Color
import json
sourceDir = "source_videos"
def generateThumbs():
for vid in os.listdir(sourceDir):
fileParts = re.search('Samurai\.Jack\.S(\d*)E(\d*)\.(\w*)\.(.*)\.avi', vid)
season = fileParts.group(1)
episode = fileParts.group(2)
chapter = fileParts.group(3)
title = fileParts.group(4).replace("."," ")
folder = "S%sE%s"%(season, episode)
print season, episode, chapter, title
try:
os.mkdir("thumbs/%s"%folder)
except OSError:
pass
subprocess.call(['ffmpeg','-i', '%s/%s'%(sourceDir,vid), '-ss','78', '-vf', 'fps=1','thumbs/%s/thumb-%%d.png'%folder])
def generateHist():
with open('output.json', 'w') as fp:
subprocess.call(['echo','{'], stdout=fp)
for folder in os.listdir("thumbs"):
print folder
if(folder == '.DS_Store'):
continue
with open('data/%s.txt'%folder, 'w') as hist:
for file in os.listdir('thumbs/%s'%folder):
if(folder == '.DS_Store'):
continue
subprocess.call(['convert', 'thumbs/%s/%s'%(folder, file), '-format', '%c', '-depth', '8', 'histogram:info:histogram_image.txt'])
p1 = subprocess.Popen(['sort', '-n', 'histogram_image.txt'], stdout=subprocess.PIPE)
subprocess.Popen(["tail", "-20"], stdin=p1.stdout, stdout=hist)
p1.stdout.close()
def cleanHist():
for file in os.listdir('data'):
if(file == '.DS_Store'):
continue
with open('data/%s'%file) as f:
lines = f.readlines()
for i, s in enumerate(lines):
hexGroups = re.search('(#.*) ', lines[i])
hexColor = hexGroups.group(1)
c = Color(hexColor)
lines[i] = c.hsl
colors = lines
colors.sort(key=lambda tup: tup[1])
chunk = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
colors = chunk(colors, 100)
for row in colors:
row.sort(key=lambda tup: (tup[2], tup[0]))
for row in colors:
row[0:] = [Color(hsl=c).hex for c in row[0:]]
h = len(colors)
w = len(colors[0])
with open('text/%s'%file, 'w') as f:
f.write("# ImageMagick pixel enumeration: %i,%i,255,srgb\n"%(h,w))
for r in range(0, h):
for c in range(0, w):
try:
col = colors[r][c]
f.write("%i,%i: "%(r,c))
rgb = Color(col).rgb
new = (int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255))
f.write("%s "%str(new))
f.write(col)
f.write(" srgb%s"%str(new))
f.write("\n")
except IndexError:
break
subprocess.call(["convert", "text/%s"%file, "img/%s"%file.replace(".txt",".png")])
# generateThumbs()
# generateHist()
cleanHist()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from mongoengine import *
# Create your models here.
from src.common.libraries.customdocument import CustomDocument
class Feedback(CustomDocument):
Name = StringField()
PhoneNumber = StringField()
Email = EmailField()
Subject = StringField()
ip = StringField(null=True)
created_at = DateTimeField()
updated_at = DateTimeField()
meta = {
'collection': 'feedback',
'strict': False,
'index_background': True,
'auto_create_index': False,
'indexes': [
'Email'
]
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messagebook', '0004_auto_20151005_1104'),
]
operations = [
migrations.AlterField(
model_name='message',
name='pub_date',
field=models.DateTimeField(verbose_name=b'pub_date'),
),
]
|
import streamlit as st
import torch
import pickle
import os
from pathlib import Path
import yaml
import time
from seal import Ciphertext, \
Decryptor, \
Encryptor, \
EncryptionParameters, \
Evaluator, \
IntegerEncoder, \
FractionalEncoder, \
KeyGenerator, \
MemoryPoolHandle, \
Plaintext, \
SEALContext, \
EvaluationKeys, \
GaloisKeys, \
PolyCRTBuilder, \
ChooserEncoder, \
ChooserEvaluator, \
ChooserPoly
from tensor_ops import vec_noise_budget
from server.seal_functions import nn_svr, most_recent_model
from server.train_models.nn_train import main as train_main
from server.train_models.nn_train import fully_conn
from client.request_predictions import encryption_handler, encryption_runner
import tensor_ops as tops
import numpy as np
import pandas as pd
DIRECTORY = Path(os.path.realpath(__file__)).parent
HE_for_Medical_Data = DIRECTORY.parent
with open(DIRECTORY.parent/"README.md", "r") as f:
README = f.read()
def sigmoid(linear_pred):
e = np.exp(-linear_pred)
sigmoid = 1/(1+e)
return sigmoid
class Quantize(object):
def __init__(self,decimal = 6, zero = 0, scale = 1):
self.fix = tops.Round(decimal = decimal, zero = zero, scale = scale)
def __call__(self,x):
return self.fix(x)
def find_file_type(dir_to_files, filename):
if isinstance(dir_to_files,str):
dir_to_files = Path(dir_to_files)
list_of_paths = dir_to_files.glob("*.pkl")
paths = sorted(list_of_paths, key=lambda p: p.stat().st_ctime)
paths.reverse()
latest_path = None
for path in paths:
if path.name == filename:
latest_path = path
break
return latest_path
def showmodelweights(modeldir,quantizer= (lambda x: x)):
modeldict = most_recent_model(modeldir)
modeldict = modeldir/modeldict
weightdict = torch.load(modeldict)["model_state_dict"]
if st.checkbox("Show model weights"):
for k,v in weightdict.items():
st.markdown(k)
st.dataframe(quantizer(v.detach().numpy()))
def getencodedmodel(modeldir, encoder, context, keygen, quantizer):
#nn_model_path, encoder = None, context = None, keygen = None
encodedmodel = nn_svr(modeldir,
encoder = encoder,
context = context,
keygen = keygen,
quantize = quantizer
)
return encodedmodel
class Streamlithelp():
def __init__(self):
parentdir = DIRECTORY/"server"
self.models_dir = parentdir/"model_params"
self.datas_dir = HE_for_Medical_Data/"data" #parentdir/"data"
def selections(self,dir):
#get dirs to old models and return
dirs = list(os.walk(dir))[0][1]
options = [x for x in dirs if x[0]!="."]
return options
def sideselectboxdir(self,message,dir):
options = self.selections(dir)
options.insert(0,"Select")
selection = st.sidebar.selectbox(message, options)
return selection
def sideselectbox(self, message, options):
options.insert(0, "Select")
return st.sidebar.selectbox(message, options)
def centerselectbox(self, message, options):
options.insert(0,"Select")
return st.selectbox(message, options)
def modelselect(self):
return self.sideselectboxdir("Choose a model", self.models_dir)
def dataselect(self):
return self.sideselectboxdir("Select data set", self.datas_dir)
class EncryptedInference(Streamlithelp):
def __init__(self):
super().__init__()
st.header("Run Inference with Encoded Models on Encrypted Data")
if st.checkbox("Description"):
st.markdown("The main purpose of this tool to help find/validate encryption settings. Depending \
on the muliplicative depth of the model chosen, the bit length of the model parameters and data \
features, and the various encryption settings, the encoded model acting on encrypted data may be \
overwhelmed by noise. If so, the output will be useless. Here, you can observe runtimes of inferencing \
on encrypted data and check the output cooresponds to the output of the unencrypted settings.")
#empty placeholders for streamlit notifications and headers
self.datasetdescription = st.empty()
self.securityleveldescription = st.empty()
self.polynomialmodulusdescription = st.empty()
self.plaintextmodulusdescription = st.empty()
st.sidebar.header("Select a model and dataset")
self.printparameters = st.sidebar.empty()
self.doneencodingmodel = st.sidebar.empty()
#list models and data
self.getpolymodels()
self.modeldir = self.sideselectbox("Choose a model", self.polymodels)
self.absolutemodeldir = self.models_dir/self.modeldir
self.dataencryptednotice = st.sidebar.empty()
self.datadir = self.dataselect()
#open data selection
if self.datadir != "Select":
with open(self.datas_dir/self.datadir/"data_dict.pkl", "rb") as f:
datadict = pickle.load(f)
self.data_x = datadict["x_"]
self.data_y = datadict["y_"]
self.npdata_x = self.data_x.to_numpy()
self.npdata_y = self.data_y.to_numpy()
self.features = self.npdata_x.shape[1]
st.write("Data_x",self.data_x)
#initialize quantize class object. this can be a part of user settings in the future
self.quantize = Quantize(decimal = 5, zero = 0, scale = 1)
#once encryption params are set this will load the encryption handler initialized class
try:
_handlerdict = find_file_type(self.absolutemodeldir,"cache_handler.pkl")
if _handlerdict != None:
with open(_handlerdict, 'rb') as f:
encryptdict = pickle.load(f)
handler = encryptdict["handler"]
whole = encryptdict["whole"]
decimal = encryptdict["decimal"]
base = encryptdict["base"]
time = encryptdict["time"]
self.handler = encryption_runner(handler)
self.handler.set_encoder(whole_sign_digits = whole,
decimal_sign_digits = decimal,
base = base,
)
tops.print_parameters(self.handler.context, empty = self.printparameters)
else:
raise
except Exception as e:
self.handler = None
#once modeldir is selected this encodes the model
if self.modeldir!="Select" and not isinstance(self.handler, type(None)):
self.model = getencodedmodel(self.absolutemodeldir,
self.handler.encoder,
self.handler._cont,
None, # for now there are cache problems that my attempts haven't fixed self.handler.keygen,
self.quantize,
)
self.doneencodingmodel.success(f"Model {self.modeldir} is encoded.")
showmodelweights(self.absolutemodeldir,quantizer = self.quantize)
#once subset of data is selected for inference, this encrypts the data
try:
_encrypteddata = find_file_type(self.absolutemodeldir,"cache_data.pkl")
if _handlerdict != None:
with open(_encrypteddata, 'rb') as f:
d = pickle.load(f)
ciphers = d["ciphers"]
plain = d["plain"]
self.dataencryptednotice.success("Stored Encrypted Data Ready")
else:
raise
except Exception as e:
ciphers = None
plain = None
#the main streamlit selection/action processes
if st.sidebar.checkbox("More Dataset Information"):
self.datasetdescription.markdown("More Dataset Information: Add name of data set as a directory \
in server/data. Then, make a pkl file of a dictionary of pandas dataframes with keys 'x\_'\
and 'y\_' for the features and targets respectively. ")
choices = ["1. Set encryption parameters",
"2. Encrypt Data",
"3. Run Inference",
]
st.sidebar.markdown("<div style='background-color:rgba(150,120,150,0.4)'><b> Come here for what's next </b></div>", unsafe_allow_html=True)
action = self.sideselectbox("Select actions in order.",choices)
st.sidebar.markdown("----------------------------------")
if action == "1. Set encryption parameters":
rawhandlerdict = self.getencryptionparams()
if self.modeldir != "Select":
if st.sidebar.button("Set encryption parameters"):
if rawhandlerdict != None:
handlerdict = self.setencryptionparams(**rawhandlerdict)
with open(self.absolutemodeldir/"cache_handler.pkl", 'wb') as f:
pickle.dump(handlerdict, f)
st.sidebar.success("Encryption Parameters Set")
tops.print_parameters(handlerdict["handler"].context,self.printparameters)
if action == "2. Encrypt Data":
if self.handler == None:
st.error("You need to set encryption settings")
else:
cipherdict = self.encodeencryptdata()
if not isinstance(cipherdict["ciphers"], type(None)):
try:
with open(self.absolutemodeldir/"cache_data.pkl", "wb") as f:
pickle.dump(cipherdict,f)
st.sidebar.success("Data Encoded")
except Exception as e:
st.sidebar.error(e)
if action == "3. Run Inference":
if st.button("Run inference for both encrypted and unencrypted models"):
if not isinstance(ciphers,type(None)) and not isinstance(plain,type(None)):
self.runinference(ciphers,plain)
else:
st.error("You need to encrypt a few samples")
def getpolymodels(self):
allmodels = self.selections(self.models_dir)
self.polymodels = []
for model in allmodels:
path = self.models_dir/model/"configs.yaml"
with open(path,"r") as f:
configs = yaml.safe_load(f)
try:
if configs["activation"] == "poly":
self.polymodels.append(model)
except:
pass
def getencryptionparams(self):
security_level = self.sideselectbox("Select security level: ", [128,192])
if st.sidebar.checkbox("Security Level Description"):
self.securityleveldescription.markdown("Fill in")
poly_modulus_pwr2 = st.sidebar.selectbox("Polynomial modulus: ", [i+10 for i in range(6)], index = 3)
if st.sidebar.checkbox("Polynomial Modulus Information"):
self.polynomialmodulusdescription.markdown("Polynomial Modulus Information: This is the main feature for determining the size of the encrypted messages. \
Messages are encrypted as polynomials in the ring of polynomials modulo x<sup>(2^n)</sup>+1. \
Here you determine n. Larger n means longer inference times, but it will help with \
evaluating circuits with larger multiplicative depth. For your model try {} first.", unsafe_allow_html=True)
plain_modulus = st.sidebar.selectbox("Plaintext modulus", [i+8 for i in range(15)], index = 2)
if plain_modulus != "Select":
plain_modulus = 2**(plain_modulus)
if st.sidebar.checkbox("Plaintext Modulus Information"):
self.plaintextmodulusdescription.markdown("Plaintext Modulus Information: Plaintexts are polynomials (numbers will be encoded as polynomials). Like \
polynomial modulus, this selection is for the power of 2 chosen to be plaintext size. \
A reasonable setting to start with for your model is {}.")
if st.sidebar.checkbox("Advanced settings"):
coderselect = False
st.sidebar.markdown("Change default encoder settings: Here you can set the significant digits of your numerical calculations. These \
must adhere to the max number of significant digits you think will be needed in \
calculations. You can also change the base of your numerical representation, default is base 3.\
The purpose of using a lower base is related to accommodating proper decoding \
depending on the depth of circuit calculations. 3 is the default.")
whole = st.sidebar.text_input("Number of whole significant digits",64)
decimal = st.sidebar.text_input("Number of decimal significant digits",32)
base = st.sidebar.text_input("Base",3)
if whole != "64" or decimal !="32" or base != "3":
try:
whole = int(whole)
decimal = int(decimal)
base = int(base)
coderselect = True
except:
st.sidebar.warning("Make sure you enter integers only.")
st.sidebar.markdown("For now, these settings aren't of great use. Future features will be added. \
The coeff modulus setting will override the security level settings. It's not suggested \
to use this setting. If used, you should enter the product of primes each of which is \
congruent to 1 modulo 2\*(polynomial modulus). Also, you can set the plain modulus to a \
setting that is is also congruent to 1 modulo 2\*(polynomial modulus). This will be useful\
when future features are added that allow batching of many plaintexts in to on cipher text\
for more efficient inference time.")
coeff_modulus = st.sidebar.text_input("Enter a coefficient modulus")
if coeff_modulus == "":
coeff_modulus = None
else:
coeff_modulus = int(coeff_modulus)
plain = st.sidebar.text_input("Enter a plaintext modulus")
if plain != "":
plain_modulus = int(plain)
else:
whole = 64
decimal = 32
base = 3
coeff_modulus = None
if security_level == "Select":
return None
else:
return {"security_level": security_level,
"poly_modulus_pwr2": poly_modulus_pwr2,
"coeff_modulus": coeff_modulus,
"plain_modulus": plain_modulus,
"whole": whole,
"decimal": decimal,
"base": base,
"time":time.time(),
}
def setencryptionparams(self,**kwargs):
security_level = kwargs["security_level"]
poly_modulus_pwr2 = kwargs["poly_modulus_pwr2"]
coeff_modulus = kwargs["coeff_modulus"]
plain_modulus = kwargs["plain_modulus"]
whole = kwargs["whole"]
decimal = kwargs["decimal"]
base = kwargs["base"]
time = kwargs["time"]
st.write(kwargs)
try:
handler = encryption_handler(security_level=security_level,
poly_modulus_pwr2=poly_modulus_pwr2,
coeff_modulus=coeff_modulus,
plain_modulus=plain_modulus,
)
st.sidebar.markdown(f"Context object address: {handler.context}")
except Exception as e:
st.sidebar.error(f"There was a problem with your encryption settings: {e}")
return {"handler": handler, "whole": whole, "decimal": decimal, "base": base, "time":time}
def encodeencryptdata(self):
ciphers = unencrypted = None
numdatapoints = self.npdata_x.shape[0]
index = self.data_x.index
st.subheader(f"Choose a range of in the {numdatapoints} samples for inference")
lower = st.text_input("Input lower end of range")
upper = st.text_input("Input upper end of range")
if (lower != "") and (upper != ""):
try:
lower = int(lower)
upper = int(upper)
if (lower>= upper) or lower<0 or upper>numdatapoints:
st.error(f"You need to make sure you choose 0<= lower < upper < {numdatapoints}")
except:
st.error("Make sure to enter numerical index values from the dataframe.")
#this encodes and encrypts the data as well as QUANTIZE
if st.button("Encode and encrypt the data in the range selected"):
try:
start = time.time()
with st.spinner("Encoding and Encrypting Data..."):
unencrypted = self.quantize(self.npdata_x[lower:upper,:])
ciphers = self.handler.encode_encrypt(unencrypted)
stop = time.time()
st.success(f"Finished encrypting {upper-lower} samples in {round(stop-start,4)} seconds!")
except Exception as e:
st.error(f"There was a problem encryting the data: {e}")
return {"ciphers": ciphers, "plain": unencrypted}
def runinference(self, ciphers, plain):
index, features = ciphers.shape[0], ciphers.shape[1]
start = time.time()
with st.spinner("Running encoded model on encrypted data..."):
encoutput = self.model.eval(ciphers)
stop = time.time()
st.success(f"Finished running encoded model with average of \
{round((stop-start)/index,4)} seconds/sample!")
noise = self.handler.vec_noise_budget(encoutput)
if noise.min == 0:
st.warning("The computations ran out of noise budget.\
Other internal features will be added in the future to help. For now, adjust the \
available encryption settings.")
with st.spinner("Now decrypting the data and finishing with sigmoid..."):
unencoutput = self.handler.decrypt_decode(encoutput)
st.write(unencoutput)
unencoutput = sigmoid(unencoutput)
testmodel = TestNoStreamlit(features, self.absolutemodeldir)
start = time.time()
with st.spinner("Running pytorch model..."):
regoutput = testmodel.eval(plain)
stop = time.time()
st.success(f"Finished running encoded model with average of {round((stop-start)/index,4)} seconds/sample!")
outstacked = np.concatenate([noise.budget,unencoutput, regoutput], axis=1)
st.write(pd.DataFrame(outstacked, columns=["Noise budget left", "Encrypted Model", "Unencrypted Model"]))
class TestNoStreamlit():
def __init__(self, input_size, modeldir):
with open(modeldir/"configs.yaml", 'r') as f:
configs = yaml.safe_load(f)
layers = configs["layers"]
activation = configs["activation"]
degrees = configs["degrees"]
self.testmodel = fully_conn(input_size, layers, activation, degrees=degrees)
list_of_paths = modeldir.glob("*")
paths = sorted(list_of_paths, key=lambda p: p.stat().st_ctime)
paths.reverse()
for path in paths:
if path.name[0:5] == "model":
latest_path = path
break
checkpoint = torch.load(latest_path)
model_state = checkpoint["model_state_dict"]
self.testmodel.load_state_dict(model_state)
def eval(self, x):
x = torch.Tensor(x)
return self.testmodel.predict(x).detach().numpy()
class Train(Streamlithelp):
def __init__(self):
super().__init__()
st.sidebar.header("Train")
if self.getdatapath():
newold = st.sidebar.selectbox("Start making a new model and train, or continue training an old model",
["Select","New", "Old"],
)
modelloaded = False
if newold == "New":
modelloaded = self.new()
if newold == "Old":
modelloaded = self.old()
def getdatapath(self):
self.datadir = self.dataselect()
if st.sidebar.checkbox("More info"):
st.sidebar.markdown("For now, the data format must be a pickled dictionary \
object with keys train_x, train_y, test_x, test_y. If you wish to train \
on new data, put it in server/data/DATADIRNAME as train_dict.pkl")
if self.datadir != "Select":
return True
def new(self):
allowed = False
st.sidebar.subheader("Choose new perceptron model parameters.")
modelname = st.sidebar.text_input("Enter a name for a model. \
Make sure there are no spaces, and it is a valid directory name format.")
modelname = modelname.replace(" ","")
if modelname in self.selections(self.models_dir) and modelname != "":
st.sidebar.warning("Enter a different model name, that one is already taken.")
modelnameok = False
else:
modelnameok = True
modelname = self.models_dir/modelname
modelgeometry = st.sidebar.text_input("Input a list of the number of perceptrons for each layer seperated by commas. \
For example, with logistic regression with two outputs, enter 2.")
modelgeo = list(modelgeometry.split(","))
if modelgeometry != '':
try:
modellayers = [int(i) for i in modelgeo if int(i)>0]
if len(modellayers) == len(modelgeo):
allowed = True
else:
allowed = False
raise
except:
st.sidebar.warning("You need to enter positive integers seperated by commas for perceptron layers.")
allowed = False
if allowed:
for layer in range(len(modelgeo)):
st.sidebar.markdown(f"Layer {layer} will have {modelgeo[layer]} neurons")
activation = st.sidebar.selectbox("Type of activation function",
["Select","Polynomial","ReLU"])
if activation == "Polynomial":
activation = "poly"
degrees = [1,2]
if activation == "ReLU":
activation = "relu"
degrees = None
advanced = st.sidebar.checkbox("Advanced training features")
if advanced:
if activation == "poly":
degrees = st.sidebar.multiselect("Input the degrees to include in polynomial \
activations. E.g, for a polynomial Ax + Bx^2 + Cx^4,\
enter 1,2,4.",
options = ["1","2","3","4","5","6"],
default = ["1","2"],
)
degrees = [int(x) for x in degrees]
lr = st.sidebar.text_input("Enter the learning rate for ADAM optimizer", value = 0.001)
b = st.sidebar.text_input("Enter the batch size", value = 30)
n = st.sidebar.text_input("Enter the number of epochs for training", value = 10)
else:
lr, b, n = 0.001, 30, 10
if allowed == True and activation != "Select":
st.sidebar.markdown("Great that's everything I need")
if allowed and modelnameok and activation != "Select":
if st.sidebar.button("Click: Save model settings & train"):
configsdict = {"modeltype": "nn",
"learning_rate": float(lr),
"batch_size": int(b),
"num_epochs": int(n),
"activation": activation,
"layers": modellayers,
"degrees": degrees,
}
os.mkdir(modelname)
configs = modelname/"configs.yaml"
with open(configs,'w') as f:
yaml.dump(configsdict, f)
st.sidebar.markdown(f"Saving model settings to path: {configs}")
self.modelname = modelname
self.configsdict = configsdict
self.training()
def old(self):
modelname = self.modelselect()
if modelname != "Select":
modelname = self.models_dir/modelname
configs = modelname/"configs.yaml"
with open(configs,"r") as f:
configsdict = yaml.safe_load(f)
placeholder = st.sidebar.empty()
if st.sidebar.checkbox("Change trainging settings."):
lr = st.sidebar.text_input("Enter the learning rate for ADAM optimizer", value = 0.001)
b = st.sidebar.text_input("Enter the batch size", value = 30)
n = st.sidebar.text_input("Enter the number of epochs for training", value = 10)
if st.sidebar.button("Click: Update settings"):
configsdict["learning_rate"] = float(lr)
configsdict["batch_size"] = int(b)
configsdict["num_epochs"] = int(n)
placeholder.json(configsdict)
with open(configs,'w') as f:
yaml.dump(configsdict, f)
placeholder.json(configsdict)
if st.sidebar.button("Click: Begin training"):
self.modelname = modelname
self.configsdict = configsdict
self.training()
def training(self):
checkpoints = most_recent_model(self.modelname)
if checkpoints == None:
continuetrain = False
else:
continuetrain = True
st.header("Training...")
train_main(modeldir = self.modelname, datadir = self.datadir, continuetrain = continuetrain)
st.sidebar.header("Choose an Action")
choices = ["README","Train Model", "Run model on Encrypted Data"]
action = st.sidebar.selectbox("Train, Test encrypted", choices)
st.sidebar.markdown("------------------")
if action == "README":
st.write(README)
if action == "Train Model":
Train()
if action == "Run model on Encrypted Data":
EncryptedInference()
|
from sqlalchemy import and_
from changes.config import db
from changes.constants import Result, Status
from changes.models.build import Build
from changes.models.job import Job
from changes.models.jobplan import JobPlan
from changes.models.plan import Plan
from changes.models.project import Project
from changes.utils.locking import lock
@lock
def update_project_stats(project_id):
last_5_builds = Build.query.filter_by(
result=Result.passed,
status=Status.finished,
project_id=project_id,
).order_by(Build.date_finished.desc())[:5]
if last_5_builds:
avg_build_time = sum(
b.duration for b in last_5_builds
if b.duration
) / len(last_5_builds)
else:
avg_build_time = None
db.session.query(Project).filter(
Project.id == project_id
).update({
Project.avg_build_time: avg_build_time,
}, synchronize_session=False)
@lock
def update_project_plan_stats(project_id, plan_id):
job_plan = JobPlan.query.filter(
JobPlan.project_id == project_id,
JobPlan.plan_id == plan_id,
).first()
if not job_plan:
return
last_5_builds = Job.query.filter(
Job.result == Result.passed,
Job.status == Status.finished,
Job.project_id == project_id,
).join(
JobPlan,
and_(
JobPlan.id == job_plan.id,
JobPlan.job_id == Job.id,
)
).order_by(Job.date_finished.desc())[:5]
if last_5_builds:
avg_build_time = sum(
b.duration for b in last_5_builds
if b.duration
) / len(last_5_builds)
else:
avg_build_time = None
db.session.query(Plan).filter(
Plan.id == job_plan.plan_id,
).update({
Plan.avg_build_time: avg_build_time,
}, synchronize_session=False)
|
"""
Simple wrapper class for pyvirtualdisplay to standardize init options
"""
import logging
from xvfbwrapper import Xvfb
log = logging.getLogger("datafeeds")
class VirtualDisplay:
def __init__(self):
self._display = Xvfb(width=1900, height=1200)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
log.info("Starting virtual display")
self._display.start()
def stop(self):
log.info("Stopping virtual display")
self._display.stop()
|
class Character:
def __init__(self, name, player, st, hp_adjust, ht, fp_adjust, iq, will_adjust, er):
hp = st+hp_adjust
fp = ht+fp_adjust
will = iq+will_adjust
self.name=name
self.player=player
self.st=st
self.ht=ht
self.fp=fp
self.hp=hp
self.iq=iq
self.will=will
self.fp_adjust=fp_adjust
self.hp_adjust=hp_adjust
self.will_adjust=will_adjust
self.er=er
self.limb = hp/2
self.extremity = hp/3
self.major = hp/2
self.knockback = st - 2
self.init_fp_ranges(fp)
self.init_hp_ranges(hp)
self.init_will_ranges(will)
def init_will_ranges(self, will):
self.will_ok = (will, (2*will)/3)
self.will_mild =((2*will)/3, will/3)
self.will_major = (will/3, 0)
self.will_constant = (0, -will)
self.will_ranges = [
self.will_ok,
self.will_mild,
self.will_major,
self.will_constant
]
def init_fp_ranges(self, fp):
self.fp_ok = (fp, fp/3)
self.fp_reeling = (fp/3, 0)
self.fp_ko = (0, (fp*-1)-1)
self.fp_ranges = [
self.fp_ok,
self.fp_reeling,
self.fp_ko
]
def init_hp_ranges(self, hp):
self.hp_ranges = []
ok_hp = (hp, 3)
reeling_hp = (3, 0)
self.hp_ranges.append(ok_hp)
self.hp_ranges.append(reeling_hp)
for i in range(0, 5):
top = -1*i*hp
bottom = -1*(i+1)*hp
self.hp_ranges.append((top, bottom))
self.hp_ranges.append((-5*hp, -10*hp))
colormode(RGB, 255)
class Colors:
WHITE = color(255, 255, 255)
BLACK = color(0, 0, 0)
RED = color(255, 0, 0)
D_GREEN = color(0, 100, 0)
L_PURPLE = color(147, 112, 219)
PURPLE = color(102, 51, 153)
INDIGO = color(75, 0, 130)
GREEN = color(0, 128, 0)
AMBER = color(184, 134, 11)
GOLD = color(255, 215,0)
YELLOW = color(255, 255, 0)
ORANGE = color(255, 165, 0)
CRIMSON = color(220, 20, 60)
VD_BLUE = color(25, 25, 112)
D_BLUE = color(00, 00, 128)
M_BLUE = color(230, 230, 139)
L_BLUE = color(00, 00, 205)
VL_BLUE = color(65, 105, 225)
VVL_BLUE = color(135, 206, 250)
MSG=lambda c, v: "- Roll vs HT-%s or KO each turn\n- At %s, Roll vs HT or Die" % (c, v)
HP_EFFECTS = [
(Colors.GREEN, lambda c, v: "- OK"),
(Colors.AMBER, lambda c, v: "- Halve BS, Move, and Dodge"),
(Colors.GOLD, lambda c, v: "- Roll vs HT or KO each turn"),
(Colors.YELLOW, MSG),
(Colors.ORANGE, MSG),
(Colors.CRIMSON, MSG),
(Colors.RED, MSG),
(Colors.BLACK, lambda c, v: "- At %s, Instant Death\n- At %s, Body destroyed" % (v, 2*v), Colors.WHITE)
]
FP_EFFECTS = [
(Colors.L_PURPLE, lambda c, v: "+FP to 1/3: OK", Colors.WHITE),
(Colors.PURPLE, lambda c, v: "1/3 to 1: Very Tired -\nHalve MS, BS, ST", Colors.WHITE),
(Colors.INDIGO, lambda c, v: "0 to -FP: Verge of Collapse -\nRoll vs HT or KO", Colors.WHITE)
]
WILL_EFFECTS = [
(Colors.VL_BLUE, lambda c, v: "+Will to 2/3:\n-0 / No Change / None", Colors.WHITE),
(Colors.L_BLUE, lambda c, v: "2/3 to 1/3:\n-2 / One Level Harder / Mild", Colors.WHITE),
(Colors.D_BLUE, lambda c, v: "1/3 to 1:\n-5 / Autofail / Major", Colors.WHITE),
(Colors.VD_BLUE, lambda c, v: "0 to -Will:\n-7 / Compulsion / Constant", Colors.WHITE)
]
BORDER_H=10
BORDER_W=10
GUTTER=10
STAT_W=200
STAT_H=70
STAT_FONT_SIZE=20
TRACK_HEADER_FONT_SIZE=16
TEXT_FONT_SIZE=12
BUBBLE_FONT_SIZE=8
FONT="Arial"
FONT_BOLD="Arial Bold"
TRACKER_Y = BORDER_H + STAT_H + GUTTER
HEADER_H = 45
BUBBLE_SIZE = 18
BUBBLE_OFFSET = BUBBLE_SIZE+2
TRACKER_H = 9 * HEADER_H
HP_TRACKER_W = 2.5*STAT_W
P_WIDTH = 2*GUTTER + 2*BORDER_W + 2*STAT_W + HP_TRACKER_W
NAME_W = P_WIDTH - (STAT_W*2 + GUTTER*2 + BORDER_W*2)
P_HEIGHT = GUTTER + 2*BORDER_H + TRACKER_H + STAT_H
HP_X = BORDER_W
HP_Y = TRACKER_Y
FP_X = HP_TRACKER_W + GUTTER + BORDER_W
FP_Y = TRACKER_Y
INFO_X = P_WIDTH-STAT_W-BORDER_W
WILL_X = FP_X
WILL_Y = TRACKER_Y + HEADER_H * 4
FP_WIDTH = WIDTH - HP_TRACKER_W - (3 * GUTTER)
size(P_WIDTH, P_HEIGHT)
def draw_hp(char):
fontsize(STAT_FONT_SIZE)
stroke(Colors.D_GREEN)
fill(Colors.D_GREEN)
rect_x = BORDER_W
rect_y = BORDER_H
rect(rect_x, rect_y, STAT_W, STAT_H)
stroke(Colors.INDIGO)
txt = "ST:\t%s\nHP:\t%s" % (char.st, char.hp)
txt_w, txt_h = textmetrics(txt)
fill(Colors.WHITE)
font(FONT_BOLD)
text(txt, rect_x+GUTTER, rect_y+txt_h/2)
def draw_energy_reserve(char):
if (char.er > 0):
x = FP_X
y = TRACKER_Y + 8 * HEADER_H
w = STAT_W
h = HEADER_H
fill(Colors.M_BLUE)
rect(x, y, w, h)
txt = "Energy Reserve"
txt_w, txt_h = textmetrics(txt)
fontsize(TRACK_HEADER_FONT_SIZE)
align(CENTER)
fill(Colors.BLACK)
text(txt, x, y+txt_h+4, w)
b_y = y + HEADER_H / 2
b_x = x + STAT_W/2 - (BUBBLE_OFFSET * char.er)/2
for i in range(char.er, 0, -1):
draw_bubble(i, b_x, b_y)
b_x = b_x + BUBBLE_OFFSET
def draw_fp(char):
fontsize(STAT_FONT_SIZE)
stroke(Colors.INDIGO)
fill(Colors.INDIGO)
rect_x = FP_X
rect_y = BORDER_H
rect(rect_x, rect_y, STAT_W, STAT_H)
txt = "HT:\t%s\nFP:\t%s" % (char.ht, char.fp)
txt_w, txt_h = textmetrics(txt)
fill(Colors.WHITE)
font(FONT_BOLD)
text(txt, rect_x+GUTTER, rect_y+txt_h/2)
def draw_name(char):
fontsize(STAT_FONT_SIZE)
stroke(Colors.BLACK)
fill(Colors.WHITE)
rect_x = STAT_W+BORDER_W+GUTTER
rect_y = BORDER_H
w = NAME_W
h = STAT_H
rect(rect_x, rect_y, w, h)
fill(Colors.BLACK)
txt = "Character:\t%s\nPlayer:\t\t%s" % (char.name, char.player)
txt_w, txt_h = textmetrics(txt)
font(FONT_BOLD)
text(txt, rect_x+GUTTER, rect_y+txt_h/2)
def fp_col(index, x_0, y_0, txt, hue, txt_hue, range, offset_range):
c_x_delta = (STAT_W / 3)
c_w = c_x_delta + 1
c_x = x_0 + c_x_delta * index
# header
fill(hue)
stroke(Colors.BLACK)
rect(c_x, y_0, c_w, HEADER_H)
# column
fill(hue)
stroke(Colors.BLACK)
rect(c_x, y_0+HEADER_H, c_w, TRACKER_H - 2 * HEADER_H)
# header text
font(FONT_BOLD)
fontsize(TEXT_FONT_SIZE)
txt_w, txt_h = textmetrics(txt)
txt_y = y_0 + 0.5 * HEADER_H - txt_h/5
fill(txt_hue)
align(CENTER)
text(txt, c_x, txt_y, c_w)
# bubbles
stroke(Colors.BLACK)
fill(Colors.WHITE)
b_x = c_x + c_x_delta/2 - BUBBLE_SIZE/2
b_y_0 = y_0 + HEADER_H + BUBBLE_SIZE/2
b_y = b_y_0
for i in offset_range:
b_y = b_y + (BUBBLE_OFFSET)
for i in range:
draw_bubble(i, b_x, b_y)
b_y = b_y + (BUBBLE_SIZE+2)
def draw_bubble(i, b_x, b_y, hue = Colors.WHITE, txt_hue = Colors.BLACK):
fontsize(BUBBLE_FONT_SIZE)
b_txt_w, b_txt_h = textmetrics(i)
stroke(Colors.BLACK)
fill(hue)
oval(b_x, b_y, BUBBLE_SIZE, BUBBLE_SIZE)
fill(txt_hue)
align(CENTER)
font(FONT)
text(i, b_x-BUBBLE_SIZE/2, b_y+3*BUBBLE_SIZE/4, 2*BUBBLE_SIZE)
def draw_fp_track_vertical(char):
rect_x = FP_X
rect_y = TRACKER_Y
w = STAT_W
h = TRACKER_H
rect(rect_x, rect_y, w, h)
# header
fill(Colors.INDIGO)
rect(rect_x, rect_y, w, HEADER_H)
fontsize(TRACK_HEADER_FONT_SIZE)
draw_track_header("Fatigue (FP) Tracking", rect_x, rect_y, w, Colors.INDIGO)
col_y = rect_y + HEADER_H
# columns
fp_col(0, rect_x, col_y, "+FP to\n1/3", Colors.L_PURPLE, Colors.WHITE, char.fp_ok, range(0))
fp_col(1, rect_x, col_y, "1/3 to 1", Colors.PURPLE, Colors.WHITE, char.fp_reeling, char.fp_ok)
fp_col(2, rect_x, col_y, "0 to -FP", Colors.INDIGO, Colors.WHITE, char.fp_ko, range(0))
draw_energy_reserve(char)
def draw_fp_track_horizontal(char):
draw_track(FP_X, FP_Y, FP_WIDTH , 0.5, FP_EFFECTS, char.fp_ranges, "Fatigue (FP) Tracking", "Effects", False, Colors.INDIGO, "unconscious.png")
def draw_will_track_horizontal(char):
draw_track(FP_X, WILL_Y, FP_WIDTH, 0.5, WILL_EFFECTS, char.will_ranges, "Will Tracking", "Will / SC / Interference", False, Colors.VD_BLUE, "supernatural.png")
def draw_hp_track(char):
draw_track(HP_X, HP_Y, HP_TRACKER_W, 0.6, HP_EFFECTS, char.hp_ranges, "Hit Point (HP) Tracking", "Effects", True, Colors.D_GREEN, "skull.jpg")
def draw_track(start_x, start_y, width, col_1_ratio, effects_list, stat_ranges, header, effects, offset_second_row, header_color, img):
x = start_x
y = start_y
w = width
h = TRACKER_H
col_1_w = w * col_1_ratio
col_2_w = width - col_1_w
col_1_x = x
col_2_x = col_1_x + col_1_w
rect(x, y, w, h)
# headers
draw_track_header(header, col_1_x, y, col_1_w, header_color)
draw_track_header(effects, col_2_x, y, col_2_w, header_color)
# rows
fontsize(TEXT_FONT_SIZE)
for i, row in enumerate(effects_list):
y_c = y + HEADER_H * (i + 1)
stat_range = stat_ranges[i]
bottom = stat_range[0]
top = stat_range[1]
fill(row[0])
# col 1
rect(col_1_x, y_c, col_1_w, HEADER_H)
# col 2
rect(col_2_x, y_c, col_2_w, HEADER_H)
try: txt_hue = effects_list[i][2]
except: txt_hue = Colors.BLACK
txt_func=row[1]
txt = txt_func(i-2, bottom)
txt_w, txt_h = textmetrics(txt)
fill(txt_hue)
fontsize(TEXT_FONT_SIZE)
font(FONT)
text(txt, col_2_x+GUTTER, y_c + HEADER_H/2)
b_y = y_c + HEADER_H / 2 - BUBBLE_SIZE/2
# bubbles
if (i == 1 and offset_second_row):
offset = b_x + BUBBLE_OFFSET
else:
offset = x
if (i < len(effects_list) - 1):
for b, value in enumerate(range(stat_range[0], stat_range[1], -1)):
b_x = offset + BUBBLE_OFFSET * b
if (i > 2 and b == 0):
draw_bubble(value, b_x, b_y, Colors.BLACK, Colors.WHITE)
else:
draw_bubble(value, b_x, b_y, Colors.WHITE, Colors.BLACK)
else:
image(img, offset + BUBBLE_SIZE/2, y_c+BUBBLE_SIZE/3, HEADER_H * 0.75, HEADER_H * 0.75)
def draw_track_header(txt, x0, y0, w, hue):
fill(hue)
rect(x0, y0, w, HEADER_H)
fontsize(TRACK_HEADER_FONT_SIZE)
align(CENTER)
head_w, head_h = textmetrics(txt)
font(FONT_BOLD)
fill(Colors.WHITE)
text(txt, x0, y0+HEADER_H-head_h, w)
def draw_info_msg(i, x0, y0, col_1_txt, col_2_txt, hue=Colors.WHITE, txt_hue=Colors.BLACK):
font(FONT)
fontsize(TEXT_FONT_SIZE)
y = y0 + HEADER_H * i
fill(hue)
rect(x0, y, STAT_W, HEADER_H)
# col 2
fill(txt_hue)
col_1_w=STAT_W/3
col_1_txt_x = x0 + GUTTER
col_1_txt_y = valign_middle(y, HEADER_H, col_1_txt)
font(FONT)
text(col_1_txt, col_1_txt_x, col_1_txt_y)
# col 2
col_2_x = x0 + col_1_w
fill(hue)
rect(col_2_x, y, STAT_W-col_1_w, HEADER_H)
fill(txt_hue)
col_2_txt_y = valign_middle(y, HEADER_H, col_2_txt)
font(FONT)
text(col_2_txt, col_2_x+GUTTER, col_2_txt_y)
def valign_middle(y0, h, txt):
txt_w, txt_h = textmetrics(txt)
return y0 + h/2# + txt_h/4
def draw_info(char):
fill(Colors.WHITE)
stroke(Colors.BLACK)
x = INFO_X
y = TRACKER_Y
w = STAT_W
h = TRACKER_H
rect(x, y, w, h)
draw_track_header("Fatigue Effects", x, y, w, Colors.INDIGO)
draw_info_msg(1, x, y, "+FP to\n1/3", "OK", Colors.L_PURPLE, Colors.WHITE)
draw_info_msg(2, x, y, "1/3 to 1", "Very Tired:\nHalve MS, BS, ST", Colors.PURPLE, Colors.WHITE)
draw_info_msg(3, x, y, "0 to -FP", "Verge of Collapse:\nRoll vs HT or KO", Colors.INDIGO, Colors.WHITE)
draw_track_header("Injury & Wounds", x, y+HEADER_H*4, w, Colors.D_GREEN)
draw_info_msg(5, x, y, "%s pts" % char.limb, "Verge of Collapse:\nRoll vs HT or KO")
draw_info_msg(6, x, y, "%s pts" % char.extremity, "Verge of Collapse:\nRoll vs HT or KO")
draw_info_msg(7, x, y, "%s pts" % char.major, "Verge of Collapse:\nRoll vs HT or KO")
draw_info_msg(8, x, y, "%s pts" % char.knockback, "Verge of Collapse:\nRoll vs HT or KO")
def draw_layout(char):
draw_hp(char)
draw_name(char)
draw_fp(char)
draw_hp_track(char)
draw_fp_track_horizontal(char)
draw_will_track_horizontal(char)
# draw_info(char)
# character = Character("RC Cleveland", "Marc Faletti", 10, 1, 11, 1, 14, 1, 0)
# character = Character("Natalia Satsuki", "Amanda Marcotte", 10, 0, 10, 0, 13, -1, 0)
# character = Character("Nora Blackburn", "Tinsley Webster", 10, 1, 12, -1, 14, 0, 0)
character = Character("Everett O'Connel", "Stewart McMacken", 12, 0, 12, 0, 15, -5, 0)
draw_layout(character)
|
empty_dict = {};
print(empty_dict);
bierce = {
"day":"A period",
"positive":"Mistaken",
"misfortune":"The Kin"
};
print(bierce);
lol = [['a','b'],['c','d'],['e','f']];
print(dict(lol));
pythons = {
'Chapman': 'Graham',
'Cleese': 'John',
'Idle': 'Eric',
'Jones': 'Terry',
'Palin': 'Michael'
};
print(pythons);
pythons['Gilliam'] = 'Gerry';
print(pythons);
pythons['Gilliam'] = 'Terry';
print(pythons);
others = {'Marx': 'Groucho', 'Howard':'Moe'};
pythons.update(others);
print(pythons);
del pythons['Marx'];
print(pythons);
#pythons.clear();
#print(pythons);
print('Chapman1' in pythons);
#print(pythons['Marx']);
if 'Marx' in pythons:
pythons['Marx'];
else:
print('no key')
print(pythons.get('Marx', 'Not a python'));
print(list(pythons.keys()));
print(list(pythons.values()));
print(list(pythons.items()));
newPythons = pythons.copy();
print(newPythons); |
# Each product has a name, base price, and tax rate. There should also be a method to calculate and return the product's
# total price based on the base price and tax rate.
class Product:
"""
This class represents a retail product. It has a name, a base price, and a tax rate
"""
def __init__(self,name,price,tax=0):
self.name = name
self.price = float(price)
self.tax = float(tax)
#str function to describe the product itself
def __str__(self):
# return "The product is {}, it costs ${:.2f}, and has a tax rate of {}.".format(self.name,self.price,self.tax)
return "{}".format(self.name)
#Function to figure out the total cost with taxes
def total_price(self):
return self.price * (1 + self.tax/100)
|
# Generated by Django 3.1.7 on 2021-05-05 03:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restaurantApp', '0013_auto_20210503_1159'),
]
operations = [
migrations.RemoveField(
model_name='menuitem',
name='category',
),
migrations.RemoveField(
model_name='order',
name='menu_item',
),
migrations.AddField(
model_name='category',
name='category_item',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='restaurantApp.menuitem'),
),
migrations.AddField(
model_name='order',
name='menuitem',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='restaurantApp.menuitem'),
),
migrations.AddField(
model_name='order',
name='transaction_id',
field=models.CharField(max_length=100, null=True),
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='restaurantApp.menuitem')),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='restaurantApp.order')),
],
),
]
|
# Generated by Django 2.2.6 on 2020-03-09 20:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('slobg_app', '0006_auto_20200309_1949'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='waiver_to_sign',
),
migrations.AddField(
model_name='profile',
name='esignature_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='volunteer_waiver_and_release',
field=models.TextField(blank=True, help_text='San Luis Obispo Botanical Garden (SLOBG) is not responsible for \n\t\tan injury or accident that may occur during my participation as a volunteer in \n\t\tany activity or event. I understand that by signing below I assume full responsibility\n\t\tfor any injury or accident that may occur during my participation as a volunteer, and \n\t\tI hereby release and hold harmless and covenant not to file suit against SLOBG, \n\t\temployees and any affiliated individuals (“releasees”) associated with my \n\t\tparticipation from any loss, liability or claims I may have arising out of my \n\t\tparticipation, including personal injury or damage suffered by me or others, whether \n\t\tcaused by falls, contact with participants, conditions of the facility, negligence of \n\t\tthe releasees or otherwise. If I do not agree to these terms, I understand that I am \n\t\tnot allowed to participate in the volunteer program.'),
),
migrations.AlterField(
model_name='profile',
name='areas_of_interest',
field=models.TextField(blank=True, help_text='Choose from the following: Education (hours vary), Events (hours vary),\n\t\tGarden Crew (Tuesday 9:00am – 11am), Garden Crew (Thursday 10:00am – 12pm),\n\t\tLibrary, Tuesday (9:00am - 12pm), Maintenance (Hours vary), Marketing/Publicity (Hours vary)\n\t\tMembership (Hours vary), Office Assistant (Hours vary), Plant Records (Tuesday 9:00am - 11am)\n\t\tPropagation Crew (Tuesday 9:00am – 11am), Volunteer Program (Hours vary)', max_length=512),
),
migrations.AlterField(
model_name='profile',
name='medical_conditions',
field=models.TextField(blank=True, help_text='Please enter any medical conditions you may have. Write N/A if none.', max_length=512),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, help_text='Please enter your phone number in the following format: (XXX) XXX-XXXX', max_length=20),
),
]
|
from collections import deque
que = deque()
counter = 0
class Collatz:
def __init__(self,value,parent):
self.value = value
if parent != None:
self.level = 1 + parent.level
else:
self.level = 1
self.parent = parent
lengths = [0 for _ in xrange(int(1E6))]
root = Collatz(1,None)
que.append(root)
longest = root
while counter < 1E6 and que:
node = que.popleft()
if node.value < 1E6:
lengths[node.value - 1] = node.level
if node.level > longest.level:
longest = node
if (node.value - 1) % 6 == 3 and node.value > 4:
node.rightchild = Collatz((node.value - 1)/3,node)
if node.rightchild.value < 1E6: counter += 1
que.append(node.rightchild)
node.leftchild = Collatz(node.value * 2,node)
if node.leftchild.value < 1E6: counter += 1
if node.leftchild.value < 1E3: que.append(node.leftchild)
maximus = longest.level
longValue = longest.value
for i in xrange(int(1E6)):
length = lengths[i]
if length == 0:
j = i+1
while True:
length += 1
if j % 2 == 0:
j /= 2
else:
j = 3*j + 1
if j < 1E6 and lengths[j-1] > 0:
length += lengths[j-1]
break
lengths[i] = length
if lengths > maximus:
longValue = i+1
print max(xrange(len(lengths)),key=lengths.__getitem__) + 1
|
#!/usr/bin/env python
#
# pKa - various programs and scripts for pKa value analysis, calculation and redesign
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
import Numeric
def length(vector):
# This function returns the length of vector
import math
sumt=0.0
for value in vector:
sumt=sumt+math.pow(value,2)
return math.sqrt(sumt)
class MolDyn:
def __init__(self,grps):
self.atoms={}
count=1
import random
for grp in grps.keys():
x=random.randint(-10,10)
y=random.randint(-10,10)
z=random.randint(-10,10)
grp_name=grps[grp]
pos=Numeric.array([200+x,200+y,200+z])
vel=Numeric.array([0,0,0])
acc=Numeric.array([0,0,0])
self.atoms[grp_name]={'pos':pos,'vel':vel,'acc':acc}
count=count+1
#pass
#
# For each pair of atoms set the energy func
#
self.enes={}
#for grp1 in grps.keys():
# self.enes[grp1]={}
# for grp2 in grps.keys():
# self.enes[grp1][grp2]=None
#print self.enes
return
def set_eqdists(self,matrix):
#
# Set new equilibrium distances
#
for grp1 in matrix.keys():
self.enes[grp1]={}
for grp2 in matrix[grp1].keys():
self.enes[grp1][grp2]=spring(50.0*abs(matrix[grp1][grp2]))
return
def EM(self,steps,timestep=0.01):
#
# Do step steps of EM
#
import math
for step in range(steps):
diff=0.0
#
# Noise
#
dists={}
for grp1 in self.atoms.keys():
#
# Apply random noise
#
import random
nlvl=5
x=random.randint(-nlvl,nlvl)
y=random.randint(-nlvl,nlvl)
z=random.randint(-nlvl,nlvl)
noise=Numeric.array([x,y,z])
self.atoms[grp1]['pos']=self.atoms[grp1]['pos']+noise/10.0
#
# Calculate distances
#
for grp1 in self.atoms.keys():
dists[grp1]={}
for grp2 in self.atoms.keys():
if grp1!=grp2:
dists[grp1][grp2]=self.atoms[grp2]['pos']-self.atoms[grp1]['pos']
#
# Calculate forces
#
for grp1 in self.atoms.keys():
force=0.0
for grp2 in self.atoms.keys():
if grp1!=grp2:
f_contrib,power=self.enes[grp1][grp2].get_force(dists[grp1][grp2])
force=force+f_contrib
diff=diff+power
#
# As set all masses to 1
#
self.atoms[grp1]['acc']=force/1.0
#
# Update positions
#
for grp in self.atoms.keys():
movement=self.atoms[grp]['acc']
#print movement
self.atoms[grp]['pos']=self.atoms[grp]['pos']+movement*math.pow(timestep,2)
return diff
#
# ------
#
class Dist_geom_EM(MolDyn):
def __init__(self,grps):
#
# Just store the groups
#
self.atoms={}
for grp in grps.keys():
name=grps[grp]
null = Numeric.array([0,0,0])
pos=Numeric.array([0,0,0]) # just init to zero for now
self.atoms[name]={'pos':pos,'vel':null,'acc':null}
return
#
# ---------
#
def set_eqdists(self,matrix):
#
# Get the distances that we should try to match
#
grps_ordered=matrix.keys()
grps_ordered.sort()
#
self.dists=[]
for grp1 in grps_ordered:
dist_row=[]
self.enes[grp1]={}
for grp2 in grps_ordered:
self.enes[grp1][grp2]=spring(50.0*abs(matrix[grp1][grp2]))
dist_row.append(matrix[grp1][grp2])
self.dists.append(dist_row)
#
# That's all ok, now do the distance geometry
#
self.dists=Numeric.array(self.dists)
positions=self.distance_geometry(self.dists)
count=0
for grp1 in grps_ordered:
pos=positions[count]
self.atoms[grp1]['pos']=pos
#
# Now we have a starting state. Do 10000 steps of EM
#
diff_coarse=self.EM(steps=1000,random=None,timestep=0.01)
#
# Fine-tune
#
diff_fine=self.EM(steps=1000,random=None,timestep=0.001)
return diff_fine
#
# -------
#
def distance_geometry(self,dists,error=0.5):
#
"""From a set of distances produce coordinates that are consistent with them"""
"""The distances can vary +/- error A"""
#
#
# Do triangle smoothing
#
atoms=self.dists.keys()
atoms.sort()
not_converged=None
round=0
while not_converged>0:
#
# Maybe we will converge this time?
#
not_converged=0
round=round+1
#
# Loop over all triangles
#
for atom1 in atoms:
for atom2 in atoms:
for atom3 in atoms:
#
# Is dist atom1-atom2 <= atom1-atom3 + atom3-atom2?
#
if dists[atom1][atom2]>(dists[atom1][atom3]+dists[atom3][atom2])+3*error:
#
# Decrease the atom1,atom2 distance by error
#
dists[atom1][atom2]=dists[atom1][atom2]-error
dists[atom2][atom1]=dists[atom1][atom2]
not_converged=not_converged+1
#
# Is dist atom1-atom2 < dist atom1-atom3 - atom3-atom2 ?
#
if dists[atom1][atom2] < dists[atom1][atom3]-dists[atom3][atom2] -3*error:
#
# Increase the atom1,atom2 distance by error
#
dists[atom1][atom2]=dists[atom1][atom2]+error
dists[atom2][atom1]=dists[atom1][atom2]
not_converged=not_converged+1
#
# Converged?
#
print 'Round: %4d, Incorrect triangles: %4d ' %(round,not_converged)
return
#
# -------
#
class spring:
#
# Simple spring force
#
def __init__(self,eq_dist):
#
# Define the energy function
#
self.eq_dist=eq_dist
return
def get_force(self,vector):
power=length(vector)-self.eq_dist
#.print power,vector,power*vector
return 2*power*vector,abs(power)
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from openerp.osv import osv
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import serialize_exception,content_disposition
def Ordenar_Lista(UnaLista, UnCampo, UnOrden):
for x in range(0, len(UnaLista)):
for y in range(x, len(UnaLista)):
if UnOrden == 'A':
if UnaLista[x][UnCampo] > UnaLista[y][UnCampo]:
UnaLista[x], UnaLista[y] = UnaLista[y], UnaLista[x]
else:
if UnaLista[x][UnCampo] < UnaLista[y][UnCampo]:
UnaLista[x], UnaLista[y] = UnaLista[y], UnaLista[x]
def prep_csv(UnTexto):
if "," in UnTexto:
result = '"' + UnTexto + '"'
else:
if (len(UnTexto) > 0) and (UnTexto[0] == '"') and (UnTexto[len(UnTexto) - 1] != '"'):
result = '"' + UnTexto + '"'
else:
result = UnTexto
return result
def prep_barras(UnTexto):
result = '[[' + UnTexto + ']]'
return result
# http://www.emiprotechnologies.com/technical_notes/odoo-technical-notes-59/post/how-to-download-any-file-on-button-click-244
class Export_event_registration_gafetes(http.Controller):
@http.route('/web/binary/download_event_registration_gafetes', type='http', auth="public")
@serialize_exception
def download_document(self,model,field,id,filename=None, **kw):
print('----------------- download_document ------------------')
registration_id = id
wizard_obj = request.registry['event.export_registration']
wizards = wizard_obj.read(request.cr, request.uid, [int(registration_id)], ['registration_ids'], context=None)
wizard = wizards[0]
registration_ids = wizard['registration_ids']
print('----')
print(registration_ids)
Model = request.registry[model]
# vamos a jalar los registros
registration_obj = request.registry['event.registration']
registrations = registration_obj.read(request.cr, request.uid, registration_ids,
['id', 'name', 'display_name', 'partner_id', 'partner_function', 'credential_printed', 'state'], context=None)
# iniciemos un objeto partner para busquedas
partner_obj = request.registry['res.partner']
# cabeceras
fc = ''
for registration in registrations:
if (not (registration['credential_printed'])) and (registration['state'] != 'cancel'):
# si el partner_id corresponde a un ejecutivo, podemos extraer sus nombres y apellidos
partner = partner_obj.read(request.cr, request.uid, registration['partner_id'][0],
['is_company', 'name', 'names', 'last_name', 'mother_name', 'gender_suffix', 'title', 'parent_id'], context=None)
if partner['is_company']:
# no tenemos un ejecutivo en la BD, solamente lo escrito en el registro
names = registration['name'] if registration['name'] else ""
apellido_p = ''
apellido_m = ''
cargo = registration['partner_function'] if registration['partner_function'] else ""
company = partner['name']
else:
# es un ejecutivo de la BD, tenemos sus datos desagregados
names = partner['names'] if partner['names'] else ""
apellido_p = partner['last_name'] if partner['last_name'] else ""
apellido_m = partner['mother_name'] if partner['mother_name'] else ""
cargo = registration['partner_function'] if registration['partner_function'] else ""
company = partner['parent_id'][1]
fc = fc + prep_barras(names) + prep_barras(apellido_p) + prep_barras(apellido_m) + prep_barras(cargo) + prep_barras(company) + '\n'
if not fc:
print('not fc')
return request.not_found()
else:
print(' si fc')
print(filename)
if not filename:
print('not filename')
filename = '%s_%s' % (model.replace('.', '_'), id)
return request.make_response(fc,
#[('Content-Type', 'application/octet-stream'),('Content-Disposition', content_disposition(filename))])
[('Content-Type', 'application/octet-stream;charset=utf-8'),('Content-Disposition', content_disposition(filename))])
|
import numpy as np
import xarray as xr
# Gerando arquivo NetCDF da média da Tmax mensal para abrir no programa
# "ParaView", disponível em https://www.paraview.org/
# Baseado em: https://www.youtube.com/watch?v=xdrcMi_FB8Q
# abrindo arquivos Tmax "nc"
var_xr = xr.open_mfdataset('/home/alexandre/Dropbox/grade_2020/data/netcdf_files/Tmax*.nc')['Tmax']
# calculando a media mensal para todo o periodo
var = var_xr.groupby('time.month').mean('time')
var_np = var.values
# criando dimensoes para lat (y) lon (x) e mes (z)
x = var_xr.longitude.values
y = var_xr.latitude.values
z = np.arange(12) # 12 meses
coords = {'z': z, 'y': y, 'x': x}
# criando/gravando DataArray com as informacoes
para_paraview = xr.DataArray(var_np, dims=('z', 'y', 'x'), coords=coords)
para_paraview.to_netcdf('teste_paraview.nc')
|
@staticmethod
def trim(raw_text):
return raw_text.strip().replace(",", "").replace("\n","").replace("\t","") |
# Compute and print powerball numbers.
###################################################
# Powerball function
# Student should enter function on the next lines.
import random
def powerball():
print "Today's numbers are "+str(random.randrange(0,60))+",",
print str(random.randrange(0,60))+",",str(random.randrange(0,60))+",",
print str(random.randrange(0,60))+ " and "+str(random.randrange(0,60))+".",
print "The Powerball number is "+str(random.randrange(0,36))
###################################################
# Tests
# Student should not change this code.
powerball()
powerball()
powerball()
|
from soa import devices, signalprocessing, analyse, distort_tf
from soa.optimisation import PSO, run_test
import numpy as np
import multiprocessing
import pickle
from scipy import signal
import os
import matplotlib.pyplot as plt
# set dir to save data
directory = '../../data/'
# init basic params
num_points_list = np.arange(120, 260, 20)
time_start = 0
time_stop = 20e-9
# set PSO params
n = 3
iter_max = 150
rep_max = 1
max_v_f = 0.05
init_v_f = max_v_f
cost_f = 'mSE'
w_init = 0.9
w_final = 0.5
on_suppress_f = 2.0
# initial transfer function numerator and denominator coefficients
num = [2.01199757841099e85]
den = [
1.64898505756825e0,
4.56217233166632e10,
3.04864287973918e21,
4.76302109455371e31,
1.70110870487715e42,
1.36694076792557e52,
2.81558045148153e62,
9.16930673102975e71,
1.68628748250276e81,
2.40236028415562e90,
]
tf = signal.TransferFunction(num, den)
# run PSO tests in parallel with multiprocessing
pso_objs = multiprocessing.Manager().list()
jobs = []
for num_points in num_points_list:
# make directory for this test
direc = directory + '/num_points_{}'.format(num_points)
if os.path.exists(direc) == False:
os.mkdir(direc)
# basic params
t = np.linspace(time_start, time_stop, num_points)
# define initial drive signal
init_OP = np.zeros(num_points) # initial drive signal (e.g. a step)
init_OP[:int(0.25*num_points)],init_OP[int(0.25*num_points):] = -1, 0.5
# get initial output of initial signal and use to generate a target set point
init_PV = distort_tf.getTransferFunctionOutput(tf,init_OP,t)
sp = analyse.ResponseMeasurements(init_PV, t).sp.sp
p = multiprocessing.Process(target=run_test,
args=(direc,
tf,
t,
init_OP,
n,
iter_max,
rep_max,
init_v_f,
max_v_f,
w_init,
w_final,
True,
'pisic_shape',
on_suppress_f,
True,
None,
cost_f,
None,
True,
True,
sp,
pso_objs,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
# pickle PSO objects so can re-load later if needed
PIK = directory + '/pickle.dat'
data = pso_objs
with open(PIK, 'wb') as f:
pickle.dump(data, f)
|
import requests
import json
headers = {
'content-type': "application/json",
'accept': "application/json",
'authorization': "Bearer Nj0ZHCvwlweSSml3Iyydbj3kSD_eK0WiSTixdOh7ng4"
}
# getting all contacts
def get_contact():
response = requests.get(
"https://api.sandbox.split.cash/contacts", headers=headers)
print(response.status_code)
print(response.text)
# getting a single customer details from id
def get_single_contact():
print("\nIndividual Customer\n")
url = "https://api.sandbox.split.cash/contacts/2e115cc1-1566-41d4-ad66-56814e1c55b6"
response = requests.get(url, headers=headers)
print(response.text)
"""
#getting your bank account id
print("\nMy Bank ID\n")
response = requests.get("https://api.sandbox.split.cash/bank_accounts", headers = headers)
print(response.text)
"""
# list all payments
def list_payments():
url = "https://api.sandbox.split.cash/payments"
response = requests.get(url, headers=headers)
print(response.status_code)
#resp_dict = json.loads(response.text)
# for i in resp_dict:
# print("\n",i,"\n",resp_dict[i])
# get a single payment
def get_a_particular_payment():
url = "https://api.sandbox.split.cash/payments/PB.3uub"
response = requests.get(url, headers=headers)
print(response.text)
# making a payment
def making_a_payment():
print("\nMaking a payment to a contact\n")
payload = {
'description': 'making_api_payment',
'matures_at': '2021-02-18T00:00:00Z',
'payouts': [
{
'amount': 300,
'description': 'making_api_payment',
'recipient_contact_id': '2e115cc1-1566-41d4-ad66-56814e1c55b6'
}]
}
url = "https://api.sandbox.split.cash/payments"
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.status_code)
# creating a contact
def create_contact():
payload = {
'name': 'creating a contactwithapi',
'email': 'createcontactviaapi@gmail.com',
'branch_code': '011101',
'account_number': '003333333'
}
url = "https://api.sandbox.split.cash/contacts/anyone"
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.status_code)
# propose unassigned agreement
def unassigned_agreements():
payload = {
'expiry_in_seconds': '900',
'terms':
{'per_payout':
{
'min_amount': 'null',
'max_amount': '1000',
},
'per_frequency':
{
'days': '7',
'max_amount': '10000'
}
}
}
url = "https://api.sandbox.split.cash/unassigned_agreements"
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.text)
# list all unassigned agreements
def list_all_unassigned_agreements():
url = "https://api.sandbox.split.cash/unassigned_agreements"
response = requests.get(url, headers=headers)
resp_dict = json.loads(response.text)
for i in resp_dict:
print("\n", i, ":", "\n", resp_dict[i])
# return one unassigned agreeement
def single_unassigned_agreements():
url = "https://api.sandbox.split.cash/unassigned_agreements/A.je3"
response = requests.get(url, headers=headers)
print(response.text)
# get a payment request
def get_a_payment_request():
url = "https://api.sandbox.split.cash/payment_request/PR.w20"
response = requests.get(url, headers=headers)
print(response.status_code)
# direct debiting/ payment request
def direct_debit():
payload = {
'description': '21.01 api_payment_request',
'matures_at': '2021-01-21T02:10:56.000Z',
'amount': '300',
'authoriser_contact_id': 'c4e6d5b7-a6a0-4b68-b481-0caaac0d3875', # one making payment
'your_bank_account_id': '8cd332a0-8cf3-479b-9e80-c17a22bf63ad'
}
url = "https://api.sandbox.split.cash/payment_requests"
response = requests.post(url, json.dumps(payload), headers=headers)
print("If successful, the response code should be = 200", response.status_code)
# list paymentrequest
def list_payment_request():
url = "https://api.sandbox.split.cash/payment_requests/outgoing"
response = requests.get(url, headers=headers)
print(response.text)
# get payment request history
def get_paymentrequest_history():
url = "https://api.sandbox.split.cash/payment_requests/PR.w20/history"
resposne = requests.get(url, headers=headers)
print(resposne.text)
# list incoming payments requests
def list_incoming_paymentrequest():
url = "https://api.sandbox.split.cash/payment_requests/incoming"
resposne = requests.get(url, headers=headers)
print(resposne.text)
# approve a payment request
def approve_paymentrequest():
url = "https://api.sandbox.split.cash/payment_requests/<paymentrequest_id>/approve"
response = requests.post(url, headers=headers)
print(response.status_code)
""" #Decline a payment request
def decline_paymentrequest():
url = "https://api.sandbox.split.cash/payment_requests/<paymentrequest_id>/decline"
response = requests.post(url, headers = headers)
print(response.status_code)
"""
# list all bank connections
def all_bank_connections():
url = "https://api.sandbox.split.cash/bank_connections"
response = requests.get(url, headers=headers)
resp_load = json.loads(response.text)
for i in resp_load:
print("\n", i, ":", resp_load[i], "\n")
# print(response.text)
# particular bank connection
def one_bank_connection():
url = "https://api.sandbox.split.cash/bank_connections/f7e79b02-317c-492b-9d5f-f3b5024f8e76"
response = requests.get(url, headers=headers)
print(response.text)
def delete_bank_connection():
url = "https://api.sandbox.split.cash/bank_connections/a43aa03b-6b1e-476f-bea7-8a5909118af5"
response = requests.delete(url, headers=headers)
print(response.status_code)
# list all open agreement
def list_agreement():
url = "https://api.sandbox.split.cash/open_agreements"
response = requests.get(url, headers=headers)
print(response.text)
# create an open agreement
def open_agreements():
payload = {
'title': 'api_open_agreements',
'terms': {
'per_payout': {
'min_amount': '10',
'max_amount': '1000'
},
'per_frequency': {
'days': '7',
'max_amount': '1000'
}
}
}
url = "https://api.sandbox.split.cash/open_agreements"
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.text)
# activate a closed-open agreeemnt
def activate_closed_open_agreement():
url = "https://api.sandbox.split.cash/open_agreements/OA.10v/activate"
response = requests.post(url, headers=headers)
print(response.status_code)
# close an active open agreements
def close_active_agreements():
url = "https://api.sandbox.split.cash/open_agreements/OA.10v/close"
response = requests.post(url, headers=headers)
print(response.status_code)
# add a receivable contact(PayID)
def receivable_contact():
payload = {
'name': 'rpayid',
'email': 'r@vishakhatesting.com',
'payid_email': 'random@vishakhatesting.com'
}
url = 'https://api.sandbox.split.cash/contacts/receivable'
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.text)
#
def agreement_withKYC():
payload = {
'authoriser': {
'name': 'KYC contact',
'email': 'kyc@gmail.com',
'bank_account': {
'branch_code': '123576',
'account_number': '45678901'
},
'terms': {
'per_payout': {
'min_amount': 'null',
'max_amount': 'null'
},
'per_frequency': {
'days': 'null',
'max_amount': 'null'
}
}
}
}
url = 'https://api.sandbox.split.cash/agreements/kyc'
response = requests.post(url, json.dumps(payload), headers=headers)
print(response.text)
agreement_withKYC()
#simulate_incoming_payid
def simulate_incoming_payid():
payload={
'payid_email' : 'random@vishakhatesting.com',
'amount': '40'
}
url = 'https://api.sandbox.split.cash/simulate/incoming_payid_payment'
response = requests.post(url, json.dumps(payload), headers = headers)
print(response.status_code) |
'''
This file will test and evaluate all of our current selection techniques,
and display them (ideally) on a single graph
'''
import os
from os.path import isdir, isfile, join
from os import listdir
import argparse
import matplotlib.pyplot as plt
import numpy as np
import train_al
def run_benchmark(iterations=10,batch_size=10):
clfs = []
xaxis = [(i + 1) * batch_size for i in range(iterations)]
queries = ["random", "uncertainty", "entropy", "ceal"]
for query in queries:
#TODO: Have train_classifier develop an image result which displays
# the confusions?
iters = 20
avg = 0
avg_acc = np.zeros(iterations)
for i in range(iters):
(acc,clf) = train_al.train_classifier(join(os.getcwd(), "data"), False, iterations, batch_size, query, False, False)
avg_acc += np.array(acc)
avg += acc[-1]
avg_acc /= iters
print("[BENCHMARK]", query, avg / iters)
plt.plot(xaxis, avg_acc)
plt.legend(queries)
plt.xlabel("Labeled Samples")
plt.ylabel("% Accuracy")
plt.title("Active Learning Selection Accuracies")
plt.show()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Testing benchmarks for various active learning seleciton techniques')
run_benchmark()
|
import getopt
import sys
import time
def usage():
print '''Usage:
-h: Show help infomation
-l: Show all table in hbase
-t {table} show table descriptors
-t {table} -k {key} : show cell
-t {table} -k {key} -c {column} : show the coulmn
-t {table} -k {key} -c {column} -v {version} :show more version
'''
class get_list_hbase:
def __init__(self):
pass
def get_list_table(self):
pass
def get_column_description(self):
pass
def get_column_value(self):
pass
def get_value_by_key(self):
pass
def get_column_version(self):
pass
def main(argv):
table_name = ''
kye = ''
cloumn = ''
version = ''
try:
opts, args = getopt.getopt(argv, "lht:c:v", ['help','list'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt , arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(2)
elif opt in ("-l", "--list"):
pass
elif opt == '-t':
table_name = arg
elif opt == '-k':
key = arg
elif opt == '-c':
cloumn = arg
elif opt == '-v':
version = int(arg)
if (table_name and key and cloumn and version ):
pass
sys.exit(0)
if (table_name and key ):
pass
if (table_name):
pass
usage()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:]) |
class Person:
def __init__(self, n, s):
self.name = n
self.surname = s
self.qualification = 1
def show_person(self):
description = (self.name + " " + self.surname +". Qualification is: " +str(self.qualification))
print(description)
p1 = Person("Ted", "Karlson")
p1.show_person()
input()
|
num1 = int(input('Digite o número 1: '))
num2 = int(input('Digite o número 2: '))
num3 = int(input('Digite o número 3: '))
menor = num1
if num2 < num1 and num2 < num3:
menor = num2
if num3 < num1 and num3 < num2:
menor = num3
maior = num1
if num2 > num1 and num2 > num3:
maior = num2
if num3> num1 and num3 > num2:
maior = num3
print(f'Menor valor: {menor}')
print(f'Maior Valor: {maior}') |
def flag(arr):
start, end = 0, len(arr) - 1
cur = 0
while cur <= end:
if arr[cur] == 0:
arr[cur], arr[start] = arr[start], arr[cur]
cur += 1
start += 1
elif arr[cur] == 1:
cur += 1
elif arr[cur]== 2:
arr[cur], arr[end] = arr[end], arr[cur]
end -= 1
def main():
arr = [1, 0, 2, 1, 0]
flag(arr)
print(arr)
main() |
"""
Base classes for biomolecules
"""
import numpy as np
from ..templates.aminoacids import templates_aa, one_to_three_aa
from ..templates.glycans import templates_gl, one_to_three_gl
from ..ff import compute_neighbors, LJ
from ..pdbIO import _dump_pdb
from ..visualization.view3d import _view3d
class Biomolecule():
"""Base class for biomolecules"""
def __init__(self):
self.sequence
self.coords
self._names
self._elements
self._offsets
self._exclusions
def __len__(self):
return len(self.sequence)
def get_torsionals(self):
raise NotImplementedError()
def dump_pdb(self, filename, b_factors=None, to_file=True):
return _dump_pdb(self, filename, b_factors, to_file)
def view3d(self):
return _view3d(self)
def energy(self, cut_off=6., neighbors=None):
"""
Compute the internal energy of a molecule using a pair-wise
Lennard-Jones potential.
Parameters
----------
cut_off : float
Only pair of atoms closer than cut_off will be used to compute the
energy. Default 6. Only valid when neighbors is None.
neighbors: set of tuples
Pairs of atoms used to compute the energy. If None (default) the
list of neighbors will be computed using a KD-tree (from scipy),
see ff.compute_neighbors for details.
Returns
----------
energy : float:
molecular energy in Kcal/mol
"""
coords = self.coords
if neighbors is None:
neighbors = compute_neighbors(coords, self._exclusions, cut_off)
energy = LJ(neighbors, coords, self._elements)
return energy
def rgyr(self):
"""
Calculates radius of gyration for a molecule
ToDo mass-weighted version ¿?
"""
coords = self.coords
center = np.mean(coords, 0)
return np.mean(np.sum((coords - center)**2, 1)) ** 0.5
class TestTube():
"""
this is a "container" class instantiated only once (Singleton)
"""
_instance = None
def __new__(cls, solvent=None, temperature=298, force_field='simple_lj',
*args, **kwargs):
if not cls._instance:
cls._instance = super(TestTube, cls).__new__(
cls, *args, **kwargs)
cls.solvent = solvent
cls.temperature = temperature
cls.force_field = force_field
cls.molecules = []
return cls._instance
def energy(self):
"""
Compute the energy of the system.
ToDo: It should be possible to compute partial energy,
like without solvent or excluding molecules.
At the moment this method lives in Protein class
"""
pass
def add(self, name):
"""
add molecules to TestTube
"""
molecules = self.molecules
if name in molecules:
print(
'We already have a copy of {:s} in the test tube!'.format(name))
else:
molecules.append(name)
def remove(self, name):
"""
remove molecules from TestTube
"""
molecules = self.molecules
if name in molecules:
molecules.remove(name)
|
import socket
import pandas as pd
import numpy as np
from watch_gst_stream import watch_stream
frames = []
steer_cmds = []
host = '192.168.1.120'
port = 9001
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
def append_frame(image_arr):
s.sendall(b'next')
steer = tuple(s.recv(1024))
print('Current controls', steer)
steer_cmds.append(steer)
frames.append(image_arr)
try:
watch_stream(append_frame, fps=10, n_frames=3001)
except KeyboardInterrupt:
pass
print('Done training; saving data')
steer_df = pd.DataFrame(steer_cmds).rename(columns={0: 'drive', 1: 'steer'})
steer_df.to_csv('training_data/steer_{}.csv'.format(len(steer_df)), index=False)
np.save('training_data/frames_{}.npy'.format(len(frames)), np.array(frames))
s.close() |
from doorman import models
class ConfigManager(object):
def __init__(self):
pass
def __setattr__(self, key, value):
pass
def __getattr__(self, item):
pass
config = ConfigManager()
|
from django.db import models
import uuid
class Category(models.Model):
name = models.CharField(max_length=200)
class Product(models.Model):
uuid = models.UUIDField(default=uuid.uuid4,primary_key=True)
name = models.CharField(max_length=200)
description = models.TextField()
price = models.DecimalField(max_digits=7, decimal_places=2)
created_at = models.DateTimeField(auto_now=True)
last_modification = models.DateTimeField(auto_now_add=True)
cover = models.ImageField(null=True)
category = models.ForeignKey(Category, models.PROTECT, null=True)
def __str__(self):
return self.name + " " + "(" + str(self.price) + ")"
|
import ee
from time import sleep
ee.Initialize()
SHIFT_BEFORE = 60
def main ():
# Load in the pre-processed GLAD alerts
glad_alerts = ee.Image('users/JohnBKilbride/SERVIR/real_time_monitoring/glad_alerts_2019_to_2020')
# Get the projection that is needed for the study area
projection = ee.Projection('EPSG:32648')
# Define the username
username = "JohnBKilbride"
# Define the output location
output_dir = "SERVIR/real_time_monitoring"
# Kernel size (# of pixels)
kernel_size = 64
# Compute the kernel radius
kernel_radius = ee.Number(kernel_size).divide(2)
# Get the study area
study_area = ee.Geometry.Polygon([[[104.0311, 14.3134],[104.0311, 12.5128],[106.0416, 12.5128],[106.0416, 14.3134]]], None, False)
# Seperate the 2019 and 2020 glad data
glad_2019 = glad_alerts.select(['alertBinary19', 'alertDate19']) \
.addBands(ee.Image.constant(2019).rename('year')) \
.select(["alertBinary19","alertDate19", "year"],["binary","alert_day", "alert_year"]) \
.toInt16()
glad_2020 = glad_alerts.select(['alertBinary20', 'alertDate20']) \
.addBands(ee.Image.constant(2020).rename('year')) \
.select(["alertBinary20","alertDate20", "year"],["binary","alert_day", "alert_year"]) \
.toInt16()
# Take a stratified random sample of the 2019 layer
sample_2019 = get_sample_of_disturbances(glad_2019, projection, study_area)
sample_2020 = get_sample_of_disturbances(glad_2020, projection, study_area)
# Merge the two different samples
combined_samples = sample_2019.merge(sample_2020)
# Add the "start date" to each of the images
# This represents the first pre-disturbance observation that was actually valid (uses Landsat QA bands)
output = ee.FeatureCollection(add_start_date(combined_samples)) \
.select(['alert_day','alert_year','start_day','start_year'])
# Apply a random displacement to each of the point locations
output = apply_displacement(output, projection, kernel_radius)
# Export the sample locations with the julian date of the disturbance to google drive
task = ee.batch.Export.table.toAsset(
collection = output,
description = "Sample-Points-GLAD",
assetId = "users/"+username+"/"+output_dir+"/sample_locations_2019_2020_50k"
)
task.start()
return None
def add_start_date (sample_points):
'''Get the timing for the "before" image'''
# Mapped function to apply over the sample_points ee.FeatureCollection
def inner_map (sample_point):
# Cast the input
sample_point = ee.Feature(sample_point)
# Get the GLAD alert day and year
alert_day = ee.Number(sample_point.get("alert_day"))
alert_year = ee.Number(sample_point.get("alert_year"))
# Construct the alert date as an ee.Date object
glad_alert_date = ee.Date.fromYMD(ee.Number(alert_year), 1, 1) \
.advance(ee.Number(alert_day), 'day')
# Get the start date
start_day = ee.Number(glad_alert_date.advance(SHIFT_BEFORE, 'day').getRelative('day', 'year'))
start_year = ee.Number(glad_alert_date.advance(SHIFT_BEFORE, 'day').get('year'))
# Append the sampled values to the original feature
output = sample_point.set({
"start_day": start_day,
"start_year": start_year
})
return output
return sample_points.map(inner_map)
def apply_displacement(features, projection, kernel_radius):
# Get the original band names for later
orig_prop_names = features.first().propertyNames()
# Add the two random column
features_random = features.randomColumn('random_x').randomColumn('random_y')
# Apply an inner function which
def inner_map (point):
# Cast the point
point = ee.Feature(point)
# Get the geometry from the point
point_geo = point.geometry()
# Get the displacement amounts
x_translate = ee.Number(point.get('random_x')).subtract(0.5).multiply(10).multiply(ee.Number(kernel_radius))
y_translate = ee.Number(point.get('random_y')).subtract(0.5).multiply(10).multiply(ee.Number(kernel_radius))
# Apply the displacement to the projection
prj_trans = projection.translate(x_translate, y_translate)
new_point_geo = ee.Geometry.Point(point_geo.transform(prj_trans).coordinates(), projection)
return point.setGeometry(new_point_geo)
return features_random.map(inner_map).select(orig_prop_names)
# Gets a random sample of disturbance locations. this funmction returns an
# ee.FeatureCollection where each point retains its geometry
def get_sample_of_disturbances (image, projection, study_area):
# Get a sample of disturbance locations
samples = image.stratifiedSample(
numPoints = 25000,
classBand = 'binary',
region = study_area,
scale = 10,
projection = projection,
seed = 57992,
classValues = [0, 1],
classPoints = [0, 25000],
dropNulls = True,
tileScale = 1,
geometries = True
)
return samples
if __name__ == "__main__":
print("Beginning script...")
main()
print("\nProgram completed.")
|
"""Secure client implementation
This is a skeleton file for you to build your secure file store client.
Fill in the methods for the class Client per the project specification.
You may add additional functions and classes as desired, as long as your
Client class conforms to the specification. Be sure to test against the
included functionality tests.
Hits from part1
## CreateFakeKey ## FIXED
It is important to bind the name of a file to its contents. Otherwise, if
a user creates files A and B, a malicious server could swap the names and have downloads
for A return the content of B and vice versa.
## CheckEncryptionMACDifferentKey ## FIXED
MAC keys and encryption keys should be different,
and this test case checks just that.
## CheckPaddingOracle ## pretty sure it's FIXED
A padding oracle attack is a chosen-ciphertext attack: if the attacker
can convince the client to decrypt (tampered) ciphertexts chosen by the attacker,
then a padding oracle attack allows the attacker to decrypt an encrypted message. In this
attack, the attacker modifies the ciphertext in some cleverly chosen fashion, asks the client
to decrypt it, and then observes the decryption process caused an invalid-padding error. If
the attacker can observe whether such an error occurred, then this leaks partial information;
after repeating this many times, an attacker can piece together all of these clues to deduce
what the original message must have been. These attacks are very powerful and are subtle
if you don’t know about them.
Failure of this test case typically indicates that you used encryption without authentication,
or that you decrypted some ciphertext without checking the MAC or before verifying the
validity of the MAC. Defend against padding oracle attacks by always verifying the MAC
before decrypting message contents.
## NameIsNotFromHash ## FIXED (Using HMAC, hope it's ok)
Verifies that IDs are not generated by hashing the filename.
That has the same problems outlined [below].
IDs should not be generated by encrypting the filename with
ECB mode or CBC/CTR mode with constant IV, as this leaks partial information. In
particular, if two filenames start with the same 16 bytes, then the adversary can detect this
(by noticing that the ciphertexts start with the same 16 bytes). Also, these modes allow
dictionary attacks: an adversary who can convince you to upload files under a name of the
attacker’s choice will be able to exploit the properties of ECB/CBC/CTR to figure out the
filename of your secret file, if the attacker can identify a list of candidate names for the secret
file. Finally, CTR mode with a constant IV is especially bad: it is vulnerable to the attacks
on pad reuse with the one-time pad.
"""
from base_client import BaseClient, IntegrityError
import json
from crypto import CryptoError
from util import *
def path_join(*strings):
"""Joins a list of strings putting a "/" between each.
:param strings: a list of strings to join
:returns: a string
"""
return '/'.join(strings)
class Client(BaseClient):
def __init__(self, storage_server, public_key_server, crypto_object,
username):
super().__init__(storage_server, public_key_server, crypto_object,
username)
self.client_storage = {} # Make a place for the client to keep state for efficiency.
# deterministic location for filename signature key
location = self.username + "/key_file"
location = self.crypto.message_authentication_code(location, str(self.rsa_priv_key.key.d)[:64], "SHA256")
# set key to what was previously stored
encrypted_key = self.storage_server.get(location)
if not encrypted_key: # check if key exists
# no key exists
self.sk_n = self.crypto.get_random_bytes(32) # key for filename
self.sk_n2 = self.crypto.get_random_bytes(32) # key for filename
keys = [self.sk_n, self.sk_n2]
keys = json.dumps(keys)
packed_key = self.asym_pack(keys, "key_file")
self.storage_server.put(location, packed_key) # add new secret key to the server, with signature attached.
else: # a previous key exists
# verify the key is valid
keys = self.asym_unpack(encrypted_key, "key_file")
keys = json.loads(keys)
self.sk_n, self.sk_n2 = keys
######################################################################################
# Helper Functions ##
######################################################################################
def hash(self, msg):
return self.crypto.cryptographic_hash(msg, "SHA256")
def hash2(self, msg, key):
"""
:param str msg: what you gonna hash
:param str key: key to hash with
:return: HMAC of msg
:rtype: str
"""
return self.crypto.message_authentication_code(msg, key, "SHA256")
def asym_pack(self, pt, filename, to=None):
"""Securely pack a plaintext for storage on the insecure server
:param str pt: The key to pack.
:param str to: user who's public key encrypts the msg
:param str filename: the correct filename to avoid swap attacks
:returns: the packed key
:rtype: str
"""
if not to:
to = self.username
if len(pt) > 256:
raise CryptoError("plaintext too long")
try:
encrypted_key = self.crypto.asymmetric_encrypt(pt, self.pks.get_encryption_key(to)) # encrypt key
except:
raise CryptoError("bad 'to' user")
signed = encrypted_key + self.hash(self.username + filename) + "ct"
signed = self.crypto.asymmetric_sign(signed, self.rsa_priv_key) # sign key
return signed + encrypted_key
def asym_unpack(self, ct, filename, sender=None):
"""unpack a symmetric key that has been stored on the insecure server
:param str ct: The key to unpack.
:param str sender: user who's public key is used to verify
:param str filename: the correct filename to avoid swap attacks
:returns: the symmetric key
:rtype: str
:raises IntegrityError: if the signature is invalid
"""
if not ct:
return None
if not sender:
sender = self.username
# verify the key is valid
signed, check = ct[:512], ct[512:] + self.hash(sender + filename) + "ct"
if self.crypto.asymmetric_verify(check, signed, self.pks.get_signature_key(sender)):
ct = ct[512:] # if valid, extract the cipher text
try:
pt = self.crypto.asymmetric_decrypt(ct, self.elg_priv_key) # decrypt
except:
raise IntegrityError("decryption failure")
else:
raise IntegrityError("Bad Signature")
return pt
def sym_pack(self, pt, sk_m, sk_s, filename):
"""
symmetrically encrypts and signs a plaintext string
:param str pt: the plaintext to be packed
:param str sk_m: symmetric key for encrypting
:param str sk_s: symmetric key for signing
:param str filename: the correct filename to avoid swap attacks
:return: the ciphertext as a string with IV and signature concatenated
:rtype: str
"""
iv = self.crypto.get_random_bytes(16) # get 16-byte IV (size of digest_block)
value = self.crypto.symmetric_encrypt(pt, sk_m, 'AES', 'CBC', iv) # encrypt w/AES-CBC
value += iv # append IV to msg CT
value += self.crypto.message_authentication_code(value + self.hash(filename), sk_s, "SHA256")
return value
def sym_unpack(self, ct, sk_m, sk_s, filename):
"""
symmetrically decrypts and verifies a ciphertext string
:param ct: ciphertext to be unpacked
:param sk_m: symmetric key for encrypting
:param sk_s: symmetric key for signing
:param str filename: the correct filename to avoid swap attacks
:return: the plaintext
:rtype: str
"""
if not ct:
return None
sign = ct[-64:] # get signature from end of msg CT
resp = ct[:-64] # remove signature from msg CT
if self.crypto.message_authentication_code(resp + self.hash(filename), sk_s, "SHA256") != sign: # verify
raise IntegrityError("Bad Signature")
try:
iv = resp[-32:] # save the IV at the end of the message CT
resp = resp[:(-32)] # remove IV from message CT
resp = self.crypto.symmetric_decrypt(resp, sk_m, "AES", "CBC", iv) # decrypt msg w/AES-CBC
except: # any exception should be due to bit flipping by malicious server.
raise IntegrityError("Data Corrupt")
return resp
def get_chunks(self, value, size=256):
""" Returns a list made up of the value broken into chunks of length "size"
:param str value: the value to break down
:param int size: how large a given chunk is
:return: chunks which together make up the value
:rtype: list[str]
"""
chunks = []
while value:
if len(value) > size:
chunks.append(value[:size])
value = value[size:]
else:
chunks.append(value)
value = ""
return chunks
def build_hash_tree(self, chunks, sk_t):
"""
uses a list of hashes to build a Merkle tree on the server based at Location
:param chunks: the list of chunks which make up the file
:return: client side copy of the tree
"""
# make hash_list
hashes = []
for i in range(len(chunks)):
hashes.append([self.hash2(chunks[i], sk_t), None, None, i]) # integrity hash
# organize into merkle tree
while len(hashes) > 1:
temp = []
for i in range(0, len(hashes), 2): # pair up the hashes
try:
parent = self.hash(hashes[i][0]+hashes[i+1][0]) # add the hash of two hashes to temp
temp.append([parent, hashes[i], hashes[i + 1]])
except IndexError:
parent = self.hash(hashes[i][0]+hashes[i][0]) # if the num hashes is odd, hash last with itself
temp.append([parent, hashes[i], None])
hashes = temp
return hashes[0]
def put_tree(self, node_loc, tree):
"""
recursively places tree nodes in the server. assumes secure location
:param node_loc: location to start placing nodes at
:param tree: the tree to place onto the server
:return:
"""
if tree:
self.storage_server.put(node_loc, tree[0]) # if hash is modified, it don't matter
self.put_tree(node_loc + '1', tree[1])
self.put_tree(node_loc + '2', tree[2])
def compare_server_tree(self, location, tree):
"""
takes the server location of a hash tree and a client side, and returns the locations in the
chunk list where there are discrepancies.
:param location: where to start the tree
:param tree: where to
:return: indexes of chunks with discrepancies
"""
bad = []
server_root = self.storage_server.get(location)
if tree and tree[0] != server_root:
if len(tree) == 4:
return [tree[3]]
elif not server_root:
return self.get_leaf_indices(tree)
else:
bad += self.compare_server_tree(location + '1', tree[1])
bad += self.compare_server_tree(location + '2', tree[2])
return bad
def update_server_tree(self, location, tree, old_tree=None):
"""
takes the server location of a hash tree and a client side, and returns the locations in the
chunk list where there are discrepancies, changes the server tree to match the client.
:param location: where to start the tree
:param tree: where to
:return: indexes of chunks with discrepancies
"""
bad = []
server_root = self.storage_server.get(location)
if old_tree:
if tree and old_tree:
if tree[0] != old_tree[0]:
self.storage_server.put(location, tree[0])
try:
return [tree[3]]
except IndexError:
bad += self.update_server_tree(location + '1', tree[1], old_tree[1])
bad += self.update_server_tree(location + '2', tree[2], old_tree[2])
elif tree and tree[0] != server_root:
self.storage_server.put(location, tree[0])
if len(tree) == 4:
return [tree[3]]
else:
bad += self.update_server_tree(location + '1', tree[1])
bad += self.update_server_tree(location + '2', tree[2])
return bad
def compare_hash_tree(self, old_tree, new_tree):
"""
takes two hash trees and returns the locations in the chunk list where there are discrepancies.
:param old_tree:
:param new_tree:
:return: indexes of chunks with discrepancies
"""
bad = []
if new_tree and old_tree:
if new_tree[0] != old_tree[0]:
try:
return [new_tree[3]]
except IndexError:
bad += self.compare_hash_tree(old_tree[1], new_tree[1])
bad += self.compare_hash_tree(old_tree[2], new_tree[2])
elif new_tree:
bad = self.get_leaf_indices(new_tree)
return bad
def get_leaf_indices(self, tree):
indices = []
if tree:
try:
return [tree[3]]
except IndexError:
indices += self.get_leaf_indices(tree[1])
indices += self.get_leaf_indices(tree[2])
return indices
def resolve(self, uid):
"""
navigates through and unpacks a series of linked pointers until it reaches a [DATA] file.
:param uid: starting location
:return uid: The location of the [DATA] file
:return sk_m: the decryption , symmetric key
:return sk_s: the signature symmetric key
:rtype str, str, str
"""
sk_n2 = self.sk_n2
sk_m = None
sk_s = None
sk_t = None
owner = self.username
requester = self.username
while True:
res = self.storage_server.get(uid)
if res is None or res.startswith("[DATA]"):
return uid, sk_m, sk_s, sk_t
elif res.startswith("[POINTER]"):
pointer = res[10:] # remove [POINTER] tag
signature = pointer[-512:]
key_list = pointer[:-512]
if not self.crypto.asymmetric_verify(key_list + owner, signature, self.pks.get_signature_key(owner)):
raise IntegrityError("bad key_list verification")
key_list = json.loads(key_list) # Load the dictionary with names
try:
pointer = key_list[self.hash2(requester, sk_n2)] # go to your place in dictionary
if sk_m and sk_s: # if you have symmetric keys, you are following the chain'
for i in range(6):
pointer[i] = self.sym_unpack(pointer[i], sk_m, sk_s, uid)
else: # if you do not have symmetric keys, you should own this pointer.
pointer = [self.asym_unpack(p, uid) for p in pointer]
except KeyError:
self.storage_server.put("invalid", "restricted")
return "invalid", None, None, None
requester = owner # you are now requesting as the owner of the last pointer
uid, sk_m, sk_s, owner, sk_n2, sk_t = pointer[:6] # update the location, keys and who owns the pointer
else:
raise IntegrityError()
######################################################################################
# Member Functions ##
######################################################################################
def upload(self, name, value, revoke=False, sk_m=None, sk_s=None, sk_t=None):
# Set UID as a hash of the username and value, use salt/signed hash
if not revoke:
uid = self.hash(path_join(self.username, name)) # join username and file name
uid = self.crypto.message_authentication_code(uid, self.sk_n, "SHA256") # use HMAC to hash/salt
uid, sk_m, sk_s, sk_t = self.resolve(uid) # use hashed uid to resolve
else:
uid = name
# If there are no keys, then this is a new file
if not sk_m or not sk_s:
sk_m = self.crypto.get_random_bytes(32) # generate a symmetric key for the message
sk_s = self.crypto.get_random_bytes(32) # key for signing
sk_t = self.crypto.get_random_bytes(32) # tree key
location = self.crypto.get_random_bytes(64)
new = True
else:
location = uid
new = False
# Encrypt message
pt_chunks = self.get_chunks(value)
ct_chunks = []
for i in range(len(pt_chunks)):
ct_chunks.append(self.sym_pack(pt_chunks[i], sk_m, sk_s, path_join(location,str(i)))) # you always need the chunks encrypted
hash_tree = self.build_hash_tree(pt_chunks, sk_t) # location/tree12212...12121
# no state upload
if new:
for i in range(len(ct_chunks)):
self.storage_server.put(path_join(location, str(i)), ct_chunks[i]) # location/###
self.put_tree(path_join(location, "tree"), hash_tree)
else: # stateful upload
if name in self.client_storage:
state_htree = self.client_storage[name][1]
if state_htree[0] == self.storage_server.get(path_join(location, "tree")): # the root hashes are equal
# we need to only modify the chunks that are different in the update and our local
# push updates to the server
change = self.update_server_tree(path_join(location, "tree"), hash_tree, state_htree)
else:
change = self.update_server_tree(path_join(location, "tree"), hash_tree)
else:
change = self.update_server_tree(path_join(location, "tree"), hash_tree)
for i in change:
self.storage_server.put(path_join(location, str(i)), ct_chunks[i])
# pack/store data
self.client_storage[name] = [value, hash_tree, pt_chunks] # update client state
value = "[DATA]"
self.storage_server.put(location, value)
# setup pointer to file location, if it does not yet exist
if new:
key_list = {}
sk_n2 = self.crypto.get_random_bytes(32) # key for HMAC of names in keylsit
pointer = [location, sk_m, sk_s, self.username, sk_n2, sk_t] # make list of keys and location
pointer = [self.asym_pack(x, uid) for x in pointer] # encrypt each item
key_list[self.hash2(self.username, self.sk_n2)] = pointer
key_list = json.dumps(key_list)
signature = self.crypto.asymmetric_sign(key_list + self.username, self.rsa_priv_key) # sign key_list as owner
key_list += signature
key_list = "[POINTER] " + key_list
self.storage_server.put(uid, key_list)
def download(self, name, revoke=False, uid=None, sk_m=None, sk_s=None, sk_t=None):
# Set UID as a hash of the username and value, use salt/signed hash
if not revoke:
uid = self.hash(path_join(self.username, name)) # join username and file name
uid = self.crypto.message_authentication_code(uid, self.sk_n, "SHA256") # use HMAC to hash/salt
uid, sk_m, sk_s, sk_t = self.resolve(uid) # use hashed uid to resolve
# Decrypt message
resp = self.storage_server.get(uid) # get CT from uid
if resp is None or resp == "restricted": # check if empty or revoked
return None
if name in self.client_storage:
state_htree = self.client_storage[name][1]
if state_htree[0] == self.storage_server.get(path_join(uid, "tree")):
# the top hashes match, return the stored value
pass
else:
# find the changes in the file and update the client version
changes = self.compare_server_tree(path_join(uid, "tree"), state_htree)
for i in changes:
ct_chunk = self.storage_server.get(path_join(uid, str(i)))
self.client_storage[name][2][i] = self.sym_unpack(ct_chunk, sk_m, sk_s, path_join(uid, str(i)))
self.client_storage[name][0] = "".join(self.client_storage[name][2]) # update client state value
else: # no state exists, download all chunks
self.client_storage[name] = ["", [], []]
i = 0
ct_chunk = self.storage_server.get(path_join(uid, str(i)))
while ct_chunk:
self.client_storage[name][2].append(self.sym_unpack(ct_chunk, sk_m, sk_s, path_join(uid, str(i))))
i += 1
ct_chunk = self.storage_server.get(path_join(uid, str(i)))
self.client_storage[name][0] = "".join(self.client_storage[name][2]) # update client state value
self.client_storage[name][1] = self.build_hash_tree(self.client_storage[name][2], sk_t)
# print(self.username, "got", self.client_storage[name][0])
return self.client_storage[name][0]
def share(self, user, name):
"""
create a new entry on your key_list file and add the user you intend to share with, then
create a signed message which will let them find your pointer, and access their entry on
the key_list file with a symmetric key.
:param user: Who you are sharing with
:param name: the file you plan to share
:return: message, which will be sent to 'user'
:rtype: str
"""
uid = self.hash(path_join(self.username, name)) # join username and file name
uid = self.crypto.message_authentication_code(uid, self.sk_n, "SHA256") # use HMAC to hash/salt
# open the pointer
pointer = self.storage_server.get(uid)
pointer = pointer[10:] # remove [POINTER] tag
signature = pointer[-512:] # remove signature
key_list = pointer[:-512] # remove key_list
if not self.crypto.asymmetric_verify(key_list + self.username, signature, self.pks.get_signature_key(self.username)):
raise IntegrityError("bad key_list signature")
key_list = json.loads(key_list) # Load the dictionary with names
# Get the info for the Data/Pointer that this pointer is aimed at
try:
pointer = key_list[self.hash2(self.username, self.sk_n2)]
pointer = [self.asym_unpack(p, uid) for p in pointer]
except KeyError:
raise IntegrityError("own key entry missing")
# make the new pointer
sk_m = self.crypto.get_random_bytes(32) # generate a symmetric key for the message
sk_s = self.crypto.get_random_bytes(32) # key for signing
sk_n2 = pointer[4] # key for HMAC of name (shared for the whole pointer)
pointer = [self.sym_pack(p, sk_m, sk_s, uid) for p in pointer]
owner_access = [self.asym_pack(sk_m, uid), self.asym_pack(sk_s, uid)] # let owner remember keys for revocation
pointer += owner_access # append the owner's keys to the back of the pointer
key_list[self.hash2(user, sk_n2)] = pointer
key_list = json.dumps(key_list)
signature = self.crypto.asymmetric_sign(key_list + self.username, self.rsa_priv_key) # sign key_list as owner
key_list += signature
key_list = "[POINTER] " + key_list
self.storage_server.put(uid, key_list)
msg = [uid, sk_m, sk_s, sk_n2]
msg = [self.asym_pack(ms, "msg", user) for ms in msg]
msg = json.dumps(msg)
return msg
def receive_share(self, from_username, new_name, message):
"""
This should just take the info and turn it into a new pointer which is located at [hash(newname)]
:param from_username: the sharer
:param new_name: the new filename for this user's storage
:param message: encrypted message from sharer
:return: None
"""
uid = self.hash(path_join(self.username, new_name)) # join username and file name
uid = self.crypto.message_authentication_code(uid, self.sk_n, "SHA256") # use HMAC to hash/salt
# verify message
message = json.loads(message)
message = [self.asym_unpack(msg, "msg", from_username) for msg in message]
location, sk_m, sk_s, sk_n2 = message
key_list = {}
pointer = [location, sk_m, sk_s, from_username, sk_n2, ""] # make list of keys and location
pointer = [self.asym_pack(p, uid) for p in pointer] # encrypt each part of the pointer
key_list[self.hash2(self.username, self.sk_n2)] = pointer
key_list = json.dumps(key_list)
key_list += self.crypto.asymmetric_sign(key_list + self.username, self.rsa_priv_key) # sign key_list as owner
key_list = "[POINTER] " + key_list
self.storage_server.put(uid, key_list)
if self.storage_server.get(uid) != key_list:
raise IntegrityError("Bad Upload")
def revoke(self, user, name):
"""
remove user from list of people with access rights, update other users with the new key.
:param user: name of the user to be revoked
:param name: file to revoke the user from
:return: True if success, False otherwise
:rtype: bool
"""
uid = self.hash(path_join(self.username, name)) # join username and file name
uid = self.crypto.message_authentication_code(uid, self.sk_n, "SHA256") # use HMAC to hash/salt
# load the pointer list
pointer = self.storage_server.get(uid)
pointer = pointer[10:] # remove [POINTER] tag
signature = pointer[-512:]
key_list = pointer[:-512]
if not self.crypto.asymmetric_verify(key_list + self.username, signature, self.pks.get_signature_key(self.username)):
raise IntegrityError("bad key_list signature")
key_list = json.loads(key_list) # Load the dictionary with names
# generate a new location and set of symmetric keys for the file
sk_m = self.crypto.get_random_bytes(32) # generate a symmetric key for the message
sk_s = self.crypto.get_random_bytes(32) # key for signing
sk_n2 = self.crypto.get_random_bytes(32) # key for HMAC
sk_t = self.crypto.get_random_bytes(32) # Key for trees
location = self.crypto.get_random_bytes(64)
new_pointer = [location, sk_m, sk_s, self.username, sk_n2, sk_t]
# get the old keys
old_pointer = key_list[self.hash2(self.username, self.sk_n2)]
old_pointer = [self.asym_unpack(p, uid) for p in old_pointer]
# update the file
if self.storage_server.get(old_pointer[0]).startswith("[POINTER]"):
print("you can't revoke if you don't own")
return False
file = self.download(name, True, old_pointer[0], old_pointer[1], old_pointer[2], old_pointer[5])
self.upload(new_pointer[0], file, True, new_pointer[1], new_pointer[2], new_pointer[5])
# clean up
self.storage_server.delete(old_pointer[0])
# update your keys
pointer = [self.asym_pack(np, uid) for np in new_pointer]
key_list[self.hash2(self.username, self.sk_n2)] = pointer
del key_list[self.hash2(user, old_pointer[4])] # remove the offending user
# update everyone else's keys
keys = list(key_list.keys())
keys.remove(self.hash2(self.username, self.sk_n2)) # don't update the owner pointer
for key in keys:
pointer = key_list[key]
share_sk_m = self.asym_unpack(pointer[-2], uid)
share_sk_s = self.asym_unpack(pointer[-1], uid)
foo = [self.sym_pack(np, share_sk_m, share_sk_s, uid) for np in new_pointer]
key_list[key] = foo + pointer[-2:]
key_list = json.dumps(key_list)
key_list += self.crypto.asymmetric_sign(key_list + self.username, self.rsa_priv_key) # sign key_list as owner
key_list = "[POINTER] " + key_list
self.storage_server.put(uid, key_list)
if self.storage_server.get(uid) != key_list:
raise IntegrityError("Bad Upload")
return True
if __name__ == "__main__":
# A basic unit test suite for the insecure Client to demonstrate
# its functions.
from servers import PublicKeyServer, StorageServer
from crypto import Crypto
print("Initializing servers and clients...")
pks = PublicKeyServer()
server = StorageServer()
crypto = Crypto()
alice = Client(server, pks, crypto, "alice")
bob = Client(server, pks, crypto, "bob")
carol = Client(server, pks, crypto, "carol")
dave = Client(server, pks, crypto, "dave")
print("testing private functions")
a = "f"*256
asmCT = alice.asym_pack(a, "test")
if a == alice.asym_unpack(asmCT, "test"):
print("simple asym enc/dec work")
asmCT = alice.asym_pack(a, "test", 'bob')
if a == bob.asym_unpack(asmCT, "test", 'alice'):
print("shared asym enc/dec work")
a = "The quick brown fox jumped over the lazy dog!1"*1000
b = "The quick brown fox jumped over the lazy dog!1"*999 + "The quick brown fox jumped over the lazy dog!2"
c = "The quick brown fox jumped over the lazy dog!1"*499 + "The quick brown fox jumped over the lazy dog!2" + "The quick brown fox jumped over the lazy dog!1" * 500
print("test that the chunk/tree system works")
chunks_a = alice.get_chunks(a)
print("chunks work: ", "".join(chunks_a) == a)
tree_a = alice.build_hash_tree(chunks_a)
alice.put_tree("test_tree a", tree_a)
chunks_b = alice.get_chunks(b)
tree_b = alice.build_hash_tree(chunks_b)
alice.put_tree("test_tree b", tree_b)
chunks_c = alice.get_chunks(c)
tree_c = alice.build_hash_tree(chunks_c)
alice.put_tree("test_tree c", tree_c)
for a, b in zip(chunks_a,chunks_b):
e = compute_edits(a, b)
if e:
print(e)
for b, c in zip(chunks_b,chunks_c):
e = compute_edits(b, c)
if e:
print(e)
test_key = alice.crypto.get_random_bytes(32)
tlen = json.dumps(tree_a)
print(len(a))
print(len(tlen))
print(len(alice.sym_pack(a, test_key, test_key, test_key)))
print(alice.compare_hash_tree(tree_a, tree_b))
print(alice.compare_server_tree("test_tree a", tree_a))
print(alice.compare_server_tree("test_tree b", tree_b))
print(alice.compare_server_tree("test_tree b", tree_a))
print(len(chunks_a[0]))
print(len(alice.sym_pack(chunks_a[0], test_key, test_key, test_key)))
print("Testing client put and share...")
a = "The quick brown fox jumped over the lazy dog!1"
alice.upload("a", a)
b = alice.download("a")
if b == a:
print("success!")
else:
print("a: ", a)
print("b: ", b)
print("Failed :(")
alice.upload("a", "b")
m = alice.share("bob", "a")
bob.receive_share("alice", "q", m)
m = bob.share("carol", "q")
carol.receive_share("bob", "w", m)
m = alice.share("dave", "a")
dave.receive_share("alice", "e", m)
print("Testing Bob, Carol, and Dave getting their new shares...")
print(bob.download('q'))
assert bob.download("q") == "b"
assert carol.download("w") == "b"
assert dave.download("e") == "b"
print("Revoking Bob...")
alice.revoke("bob", "a")
dave.upload("e", "c")
print("Testing Bob, Carol, and Dave getting their shares...")
assert alice.download("a") == "c"
assert bob.download("q") != "c"
assert carol.download("w") != "c"
assert dave.download("e") == "c"
print("Testing restarting PKS and clients...")
pks2 = PublicKeyServer()
alice2 = Client(server, pks2, crypto, "alice")
bob2 = Client(server, pks2, crypto, "bob")
assert alice2.rsa_priv_key.publickey() == bob2.pks.get_signature_key("alice")
assert alice2.elg_priv_key.publickey() == bob2.pks.get_encryption_key("alice")
crypto._remove_keyfile("alice")
crypto._remove_keyfile("bob")
crypto._remove_keyfile("carol")
crypto._remove_keyfile("dave")
print("Basic tests passed.")
|
a = 1
b= 1
n = 54
m = 27
for i in range(m):
a = a * (n - i)
b = b * (i+ 1)
#c = a//b
#print("%d" %c)
d = 1
for i in range(1,n+1):
d *= i
aa = a
for i in range(1,1000):
f = d / aa
print("%.2f %d" %(f,i))
aa *= a
|
# encoding: utf-8
from functools import wraps
from validator import validate as validator_validate
from flask import jsonify, wrappers, request
from .response import *
from .pagination import PaginatedDataView
def handle_exception(decorated):
@wraps(decorated)
def inner(*args, **kwargs):
try:
result = decorated(*args, **kwargs)
except Exception as e:
return jsonify(Response(ResponseCodeEnum.ERROR, e.message))
if isinstance(result, Response):
return jsonify(result)
return result
return inner
def _get_view_data_by_collection(view_class, collection):
res = []
for r in collection:
res.append(view_class(r).data())
if hasattr(view_class, 'name'):
result_data = {
'{}s'.format(getattr(view_class, 'name')): res
}
else:
result_data = res
return result_data
def simple_to_view(view_class):
"""
:param view_class: view class
:return: flask json response
"""
def decorator(decorated):
@wraps(decorated)
def inner(*args, **kwargs):
result = handle_exception(decorated)(*args, **kwargs)
if isinstance(result, Response):
return jsonify(result)
if isinstance(result, wrappers.Response):
return result
if isinstance(result, list):
result_data = _get_view_data_by_collection(view_class, result)
elif isinstance(result, PaginatedDataView):
result_data = _get_view_data_by_collection(view_class, result.collection)
result_data['pagination'] = {
"page": result.page,
"total_record": result.total_count,
"page_size": result.page_size,
"total_page": result.total_page
}
else:
view = view_class(result)
result_data = view.data()
return jsonify(Response(result=result_data))
return inner
return decorator
def validate(rules):
def decorator(decorated):
@wraps(decorated)
def inner(*args, **kwargs):
result = validator_validate(rules, request.json)
if not result.valid:
return jsonify(Response(ResponseCodeEnum.ERROR, str(result.errors)))
return decorated(*args, **kwargs)
return inner
return decorator
|
import random
import numpy as np
import matplotlib.pyplot as plt
import csv
def rand_seed(m, b, num=2):
# create empty list
x_coor = []
y_coor = []
label = []
# positive and negtive point number
pos_num = int(num / 2)
neg_num = num - pos_num
# random create point
for i in range(pos_num):
x = random.randint(0, 600)
r = random.randint(3, 600)
y = m * x + b - r
# save the coordinate of x and y
x_coor.append(x)
y_coor.append(y)
# save label, right=1, left=0
label.append(1 if m >= 0 else -1)
for i in range(neg_num):
x = random.randint(0, 600)
r = random.randint(3, 600)
y = m * x + b + r
x_coor.append(x)
y_coor.append(y)
label.append(-1 if m >= 0 else 1)
with open('data_Q3.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(2000):
writer.writerow([x_coor[i],y_coor[i],label[i]])
#return x_coor, y_coor, label
if __name__ == '__main__':
# set value of m and b
#m, b = 4, 3
m = random.randint(-4,4)
b = random.randint(0, 5)
# plot the function curve
x = np.arange(500) # x = [0, 1,..., 29]
y = m * x + b
# plot the random point
# blue for positive and red for negative
rand_seed(m, b, num=2000)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.