index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,500 | d71e5a37b65b0ad0ebcc3378ba9347b1ddb72563 | from django.apps import AppConfig
class CssframeworksConfig(AppConfig):
name = 'CSSFrameworks'
|
14,501 | 48f7381a79dfcd3b1b1438dba8f7a3f51bc85962 | import requests
from lxml import etree
def handle_douyin_web_share():
share_web_url = 'https://www.douyin.com/share/user/88445518961'
share_web_header = {
'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
share_web_response = requests.get(url = share_web_url,headers = share_web_header)
share_web_html = etree.HTML(share_web_response.text)
nickname = share_web_html.xpath('//p[@class="nickname"]/text()')
print(nickname)
handle_douyin_web_share() |
14,502 | c58abacf58a65b37f1844af3a5f27b80d12f8460 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Documents\CEID\Βασεις Δεδομενων\ProjectDB\Recruit.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_RecruitWindow(object):
def setupUi(self, RecruitWindow):
RecruitWindow.setObjectName("RecruitWindow")
RecruitWindow.resize(650, 350)
RecruitWindow.setMinimumSize(QtCore.QSize(650, 350))
font = QtGui.QFont()
font.setFamily("Bahnschrift Light")
RecruitWindow.setFont(font)
self.centralwidget = QtWidgets.QWidget(RecruitWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.Reset = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Reset.sizePolicy().hasHeightForWidth())
self.Reset.setSizePolicy(sizePolicy)
self.Reset.setObjectName("Reset")
self.gridLayout.addWidget(self.Reset, 2, 3, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 2, 1, 1, 1)
self.Save = QtWidgets.QPushButton(self.centralwidget)
self.Save.setObjectName("Save")
self.gridLayout.addWidget(self.Save, 2, 2, 1, 1)
self.Exit = QtWidgets.QPushButton(self.centralwidget)
self.Exit.setObjectName("Exit")
self.gridLayout.addWidget(self.Exit, 2, 0, 1, 1)
self.Tabs = QtWidgets.QTabWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Tabs.sizePolicy().hasHeightForWidth())
self.Tabs.setSizePolicy(sizePolicy)
self.Tabs.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Bahnschrift Light")
font.setBold(False)
font.setWeight(50)
self.Tabs.setFont(font)
self.Tabs.setTabPosition(QtWidgets.QTabWidget.North)
self.Tabs.setTabShape(QtWidgets.QTabWidget.Rounded)
self.Tabs.setIconSize(QtCore.QSize(16, 16))
self.Tabs.setElideMode(QtCore.Qt.ElideNone)
self.Tabs.setUsesScrollButtons(False)
self.Tabs.setDocumentMode(False)
self.Tabs.setTabsClosable(False)
self.Tabs.setTabBarAutoHide(False)
self.Tabs.setObjectName("Tabs")
self.ACCOUNT = QtWidgets.QWidget()
self.ACCOUNT.setObjectName("ACCOUNT")
self.gridLayout_2 = QtWidgets.QGridLayout(self.ACCOUNT)
self.gridLayout_2.setContentsMargins(10, 10, 10, 10)
self.gridLayout_2.setSpacing(5)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem1, 3, 0, 1, 4)
self._email = QtWidgets.QLabel(self.ACCOUNT)
self._email.setObjectName("_email")
self.gridLayout_2.addWidget(self._email, 2, 2, 1, 1)
self._regdate = QtWidgets.QLabel(self.ACCOUNT)
self._regdate.setObjectName("_regdate")
self.gridLayout_2.addWidget(self._regdate, 2, 0, 1, 1)
self.Email = QtWidgets.QLineEdit(self.ACCOUNT)
self.Email.setObjectName("Email")
self.gridLayout_2.addWidget(self.Email, 2, 3, 1, 1)
self.Regdate = QtWidgets.QDateTimeEdit(self.ACCOUNT)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Regdate.sizePolicy().hasHeightForWidth())
self.Regdate.setSizePolicy(sizePolicy)
self.Regdate.setFrame(True)
self.Regdate.setReadOnly(False)
self.Regdate.setButtonSymbols(QtWidgets.QAbstractSpinBox.PlusMinus)
self.Regdate.setProperty("showGroupSeparator", False)
self.Regdate.setDisplayFormat("dd-MM-yyyy HH:mm:ss")
self.Regdate.setObjectName("Regdate")
self.gridLayout_2.addWidget(self.Regdate, 2, 1, 1, 1)
self.Username = QtWidgets.QLineEdit(self.ACCOUNT)
self.Username.setEnabled(True)
self.Username.setReadOnly(True)
self.Username.setObjectName("Username")
self.gridLayout_2.addWidget(self.Username, 0, 1, 1, 1)
self._password = QtWidgets.QLabel(self.ACCOUNT)
self._password.setObjectName("_password")
self.gridLayout_2.addWidget(self._password, 0, 2, 1, 1)
self.Name = QtWidgets.QLineEdit(self.ACCOUNT)
self.Name.setObjectName("Name")
self.gridLayout_2.addWidget(self.Name, 1, 1, 1, 1)
self.Password = QtWidgets.QLineEdit(self.ACCOUNT)
self.Password.setEchoMode(QtWidgets.QLineEdit.PasswordEchoOnEdit)
self.Password.setObjectName("Password")
self.gridLayout_2.addWidget(self.Password, 0, 3, 1, 1)
self._surname = QtWidgets.QLabel(self.ACCOUNT)
self._surname.setObjectName("_surname")
self.gridLayout_2.addWidget(self._surname, 1, 2, 1, 1)
self._name = QtWidgets.QLabel(self.ACCOUNT)
self._name.setObjectName("_name")
self.gridLayout_2.addWidget(self._name, 1, 0, 1, 1)
self.Surname = QtWidgets.QLineEdit(self.ACCOUNT)
self.Surname.setObjectName("Surname")
self.gridLayout_2.addWidget(self.Surname, 1, 3, 1, 1)
self._username = QtWidgets.QLabel(self.ACCOUNT)
self._username.setObjectName("_username")
self.gridLayout_2.addWidget(self._username, 0, 0, 1, 1)
self.Tabs.addTab(self.ACCOUNT, "")
self.WORKPOS = QtWidgets.QWidget()
self.WORKPOS.setObjectName("WORKPOS")
self.gridLayout_3 = QtWidgets.QGridLayout(self.WORKPOS)
self.gridLayout_3.setContentsMargins(10, 10, 10, 10)
self.gridLayout_3.setSpacing(5)
self.gridLayout_3.setObjectName("gridLayout_3")
self.J_position = QtWidgets.QLabel(self.WORKPOS)
self.J_position.setObjectName("J_position")
self.gridLayout_3.addWidget(self.J_position, 2, 0, 1, 1)
self.JSalary = QtWidgets.QSpinBox(self.WORKPOS)
self.JSalary.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.JSalary.setMaximum(1000000000)
self.JSalary.setObjectName("JSalary")
self.gridLayout_3.addWidget(self.JSalary, 5, 1, 1, 1)
self.J_recruiter = QtWidgets.QLabel(self.WORKPOS)
self.J_recruiter.setObjectName("J_recruiter")
self.gridLayout_3.addWidget(self.J_recruiter, 2, 3, 1, 1)
self.JTitle = QtWidgets.QLineEdit(self.WORKPOS)
self.JTitle.setLayoutDirection(QtCore.Qt.LeftToRight)
self.JTitle.setObjectName("JTitle")
self.gridLayout_3.addWidget(self.JTitle, 2, 1, 1, 1)
self.J_salary = QtWidgets.QLabel(self.WORKPOS)
self.J_salary.setObjectName("J_salary")
self.gridLayout_3.addWidget(self.J_salary, 5, 0, 1, 1)
self.JRecruiter = QtWidgets.QLineEdit(self.WORKPOS)
self.JRecruiter.setReadOnly(True)
self.JRecruiter.setObjectName("JRecruiter")
self.gridLayout_3.addWidget(self.JRecruiter, 2, 4, 1, 1)
self.J_subdate = QtWidgets.QLabel(self.WORKPOS)
self.J_subdate.setObjectName("J_subdate")
self.gridLayout_3.addWidget(self.J_subdate, 5, 3, 1, 1)
self.J_startdate = QtWidgets.QLabel(self.WORKPOS)
self.J_startdate.setObjectName("J_startdate")
self.gridLayout_3.addWidget(self.J_startdate, 6, 0, 1, 1)
self._antikeim = QtWidgets.QLabel(self.WORKPOS)
self._antikeim.setObjectName("_antikeim")
self.gridLayout_3.addWidget(self._antikeim, 6, 3, 1, 1)
self.JStartdate = QtWidgets.QDateEdit(self.WORKPOS)
self.JStartdate.setDisplayFormat("dd-MM-yyyy")
self.JStartdate.setObjectName("JStartdate")
self.gridLayout_3.addWidget(self.JStartdate, 6, 1, 1, 1)
self.JAnndate = QtWidgets.QDateTimeEdit(self.WORKPOS)
self.JAnndate.setReadOnly(True)
self.JAnndate.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.JAnndate.setDisplayFormat("dd-MM-yyyy HH:mm:ss")
self.JAnndate.setObjectName("JAnndate")
self.gridLayout_3.addWidget(self.JAnndate, 4, 1, 1, 1)
self.J_edra = QtWidgets.QLabel(self.WORKPOS)
self.J_edra.setObjectName("J_edra")
self.gridLayout_3.addWidget(self.J_edra, 4, 3, 1, 1)
self.JEdra = QtWidgets.QLineEdit(self.WORKPOS)
self.JEdra.setObjectName("JEdra")
self.gridLayout_3.addWidget(self.JEdra, 4, 4, 1, 1)
self.J_anndate = QtWidgets.QLabel(self.WORKPOS)
self.J_anndate.setObjectName("J_anndate")
self.gridLayout_3.addWidget(self.J_anndate, 4, 0, 1, 1)
self._idpos = QtWidgets.QLabel(self.WORKPOS)
self._idpos.setObjectName("_idpos")
self.gridLayout_3.addWidget(self._idpos, 1, 0, 1, 3)
self.J_state = QtWidgets.QLabel(self.WORKPOS)
self.J_state.setStatusTip("")
self.J_state.setText("<html><head/><body><p><br/></p></body></html>")
self.J_state.setObjectName("J_state")
self.gridLayout_3.addWidget(self.J_state, 7, 0, 1, 1)
self.JIDPos = QtWidgets.QComboBox(self.WORKPOS)
self.JIDPos.setObjectName("JIDPos")
self.gridLayout_3.addWidget(self.JIDPos, 1, 3, 1, 2)
self.JSubdate = QtWidgets.QDateEdit(self.WORKPOS)
self.JSubdate.setDisplayFormat("dd-MM-yyyy")
self.JSubdate.setObjectName("JSubdate")
self.gridLayout_3.addWidget(self.JSubdate, 5, 4, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2, 8, 0, 1, 4)
self.JAntikeim = QtWidgets.QListWidget(self.WORKPOS)
self.JAntikeim.setObjectName("JAntikeim")
self.gridLayout_3.addWidget(self.JAntikeim, 6, 4, 3, 1)
self.Tabs.addTab(self.WORKPOS, "")
self.FIRM = QtWidgets.QWidget()
self.FIRM.setObjectName("FIRM")
self.gridLayout_4 = QtWidgets.QGridLayout(self.FIRM)
self.gridLayout_4.setContentsMargins(10, 10, 10, 10)
self.gridLayout_4.setSpacing(5)
self.gridLayout_4.setObjectName("gridLayout_4")
self.EName = QtWidgets.QLineEdit(self.FIRM)
self.EName.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EName.sizePolicy().hasHeightForWidth())
self.EName.setSizePolicy(sizePolicy)
self.EName.setReadOnly(True)
self.EName.setObjectName("EName")
self.gridLayout_4.addWidget(self.EName, 0, 1, 1, 3)
self.E_adress = QtWidgets.QLabel(self.FIRM)
self.E_adress.setObjectName("E_adress")
self.gridLayout_4.addWidget(self.E_adress, 3, 0, 1, 1)
self.ETel = QtWidgets.QDoubleSpinBox(self.FIRM)
self.ETel.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.ETel.setDecimals(0)
self.ETel.setMaximum(99999999999.0)
self.ETel.setObjectName("ETel")
self.gridLayout_4.addWidget(self.ETel, 2, 6, 1, 1)
self.ERoad = QtWidgets.QLineEdit(self.FIRM)
self.ERoad.setObjectName("ERoad")
self.gridLayout_4.addWidget(self.ERoad, 3, 1, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 178, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem3, 4, 2, 1, 2)
self.E_tel = QtWidgets.QLabel(self.FIRM)
self.E_tel.setObjectName("E_tel")
self.gridLayout_4.addWidget(self.E_tel, 2, 4, 1, 1)
self.ECity = QtWidgets.QLineEdit(self.FIRM)
self.ECity.setObjectName("ECity")
self.gridLayout_4.addWidget(self.ECity, 3, 3, 1, 3)
self.E_name = QtWidgets.QLabel(self.FIRM)
self.E_name.setObjectName("E_name")
self.gridLayout_4.addWidget(self.E_name, 0, 0, 1, 1)
self.ECountry = QtWidgets.QLineEdit(self.FIRM)
self.ECountry.setObjectName("ECountry")
self.gridLayout_4.addWidget(self.ECountry, 3, 6, 1, 1)
self.EDoy = QtWidgets.QLineEdit(self.FIRM)
self.EDoy.setReadOnly(True)
self.EDoy.setObjectName("EDoy")
self.gridLayout_4.addWidget(self.EDoy, 2, 1, 1, 3)
self.EAFM = QtWidgets.QSpinBox(self.FIRM)
self.EAFM.setReadOnly(True)
self.EAFM.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.EAFM.setMaximum(1000000000)
self.EAFM.setObjectName("EAFM")
self.gridLayout_4.addWidget(self.EAFM, 0, 6, 1, 1)
self.E_doy = QtWidgets.QLabel(self.FIRM)
self.E_doy.setObjectName("E_doy")
self.gridLayout_4.addWidget(self.E_doy, 2, 0, 1, 1)
self.E_afm = QtWidgets.QLabel(self.FIRM)
self.E_afm.setObjectName("E_afm")
self.gridLayout_4.addWidget(self.E_afm, 0, 4, 1, 1)
self.ENum = QtWidgets.QSpinBox(self.FIRM)
self.ENum.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.ENum.setMaximum(99999999)
self.ENum.setObjectName("ENum")
self.gridLayout_4.addWidget(self.ENum, 3, 2, 1, 1)
self.Tabs.addTab(self.FIRM, "")
self.gridLayout.addWidget(self.Tabs, 0, 0, 1, 4)
RecruitWindow.setCentralWidget(self.centralwidget)
self._email.setBuddy(self.Email)
self._regdate.setBuddy(self.Regdate)
self._password.setBuddy(self.Password)
self._surname.setBuddy(self.Surname)
self._name.setBuddy(self.Name)
self._username.setBuddy(self.Username)
self.J_position.setBuddy(self.JTitle)
self.J_recruiter.setBuddy(self.JRecruiter)
self.J_salary.setBuddy(self.JSalary)
self.J_subdate.setBuddy(self.JSubdate)
self.J_startdate.setBuddy(self.JStartdate)
self._antikeim.setBuddy(self.JAntikeim)
self.J_edra.setBuddy(self.JEdra)
self.J_anndate.setBuddy(self.JAnndate)
self._idpos.setBuddy(self.JIDPos)
self.E_adress.setBuddy(self.ERoad)
self.E_tel.setBuddy(self.ETel)
self.E_name.setBuddy(self.EName)
self.E_doy.setBuddy(self.EDoy)
self.E_afm.setBuddy(self.EAFM)
self.retranslateUi(RecruitWindow)
self.Tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(RecruitWindow)
RecruitWindow.setTabOrder(self.Tabs, self.Username)
RecruitWindow.setTabOrder(self.Username, self.Password)
RecruitWindow.setTabOrder(self.Password, self.Name)
RecruitWindow.setTabOrder(self.Name, self.Surname)
RecruitWindow.setTabOrder(self.Surname, self.Regdate)
RecruitWindow.setTabOrder(self.Regdate, self.Email)
RecruitWindow.setTabOrder(self.Email, self.JIDPos)
RecruitWindow.setTabOrder(self.JIDPos, self.JTitle)
RecruitWindow.setTabOrder(self.JTitle, self.JRecruiter)
RecruitWindow.setTabOrder(self.JRecruiter, self.JAnndate)
RecruitWindow.setTabOrder(self.JAnndate, self.JEdra)
RecruitWindow.setTabOrder(self.JEdra, self.JSalary)
RecruitWindow.setTabOrder(self.JSalary, self.JSubdate)
RecruitWindow.setTabOrder(self.JSubdate, self.JStartdate)
RecruitWindow.setTabOrder(self.JStartdate, self.JAntikeim)
RecruitWindow.setTabOrder(self.JAntikeim, self.EName)
RecruitWindow.setTabOrder(self.EName, self.EAFM)
RecruitWindow.setTabOrder(self.EAFM, self.EDoy)
RecruitWindow.setTabOrder(self.EDoy, self.ETel)
RecruitWindow.setTabOrder(self.ETel, self.ERoad)
RecruitWindow.setTabOrder(self.ERoad, self.ENum)
RecruitWindow.setTabOrder(self.ENum, self.ECity)
RecruitWindow.setTabOrder(self.ECity, self.ECountry)
RecruitWindow.setTabOrder(self.ECountry, self.Reset)
RecruitWindow.setTabOrder(self.Reset, self.Save)
RecruitWindow.setTabOrder(self.Save, self.Exit)
def retranslateUi(self, RecruitWindow):
_translate = QtCore.QCoreApplication.translate
RecruitWindow.setWindowTitle(_translate("RecruitWindow", "MainWindow"))
self.Reset.setText(_translate("RecruitWindow", "Reset"))
self.Save.setText(_translate("RecruitWindow", "Save"))
self.Exit.setText(_translate("RecruitWindow", "Log out"))
self._email.setText(_translate("RecruitWindow", "E-Mail"))
self._regdate.setText(_translate("RecruitWindow", "Register date"))
self._password.setText(_translate("RecruitWindow", "Password"))
self._surname.setText(_translate("RecruitWindow", "Surname"))
self._name.setText(_translate("RecruitWindow", "Name"))
self._username.setText(_translate("RecruitWindow", "Username"))
self.Tabs.setTabText(self.Tabs.indexOf(self.ACCOUNT), _translate("RecruitWindow", "My Account"))
self.J_position.setText(_translate("RecruitWindow", "Title"))
self.J_recruiter.setText(_translate("RecruitWindow", "Recruiter"))
self.J_salary.setText(_translate("RecruitWindow", "Salary"))
self.J_subdate.setText(_translate("RecruitWindow", "Expiration of applications date"))
self.J_startdate.setText(_translate("RecruitWindow", "Starting Date"))
self._antikeim.setText(_translate("RecruitWindow", "Subjects"))
self.J_edra.setText(_translate("RecruitWindow", "Headquarters"))
self.J_anndate.setText(_translate("RecruitWindow", "Posting date"))
self._idpos.setText(_translate("RecruitWindow", "Choose job to edit:"))
self.Tabs.setTabText(self.Tabs.indexOf(self.WORKPOS), _translate("RecruitWindow", "Jobs"))
self.E_adress.setText(_translate("RecruitWindow", "Adress"))
self.ERoad.setPlaceholderText(_translate("RecruitWindow", "Street"))
self.E_tel.setText(_translate("RecruitWindow", "Phone"))
self.ECity.setPlaceholderText(_translate("RecruitWindow", "City"))
self.E_name.setText(_translate("RecruitWindow", "Brand"))
self.ECountry.setPlaceholderText(_translate("RecruitWindow", "Country"))
self.E_doy.setText(_translate("RecruitWindow", "DOY"))
self.E_afm.setText(_translate("RecruitWindow", "AFM"))
self.Tabs.setTabText(self.Tabs.indexOf(self.FIRM), _translate("RecruitWindow", "Firm"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
RecruitWindow = QtWidgets.QMainWindow()
ui = Ui_RecruitWindow()
ui.setupUi(RecruitWindow)
RecruitWindow.show()
sys.exit(app.exec_())
|
14,503 | ed3c06339ee40242d036ad877ea2ecb80481f359 | print (__doc__)
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20,2)-[2,2],np.random.randn(20,2)+[2,2]]
Y = [0] * 20 +[1] * 20
#fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X,Y)
#get the separating hyperplane
w= clf.coef_[0]
a= -w[0] / w[1]
xx = np.linspace(-5,5)
yy = a * xx - (clf.intercept_[0])/w[1]
#plot the parallels to the separating hyperplane that pass through the support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up= a * xx +(b[1]-a * b[0])
#plot the line ,the points ,and the nearest vectors to the plane
pl.plot(xx,yy,'k-')
pl.plot(xx,yy_down,'k--')
pl.plot(xx,yy_up,'k--')
pl.scatter(clf.support_vectors_[:,0],clf.support_vectors_[:,1],s=80,facecolors='none')
pl.scatter(X[:,0],X[:,1],c=Y,cmap=pl.cm.Paired)
pl.axis('tight')
pl.show() |
14,504 | 7f66a05d894edcc1a2c04635dcb250515bf6af7a | import pyglet;
from itertools import chain;
from victor.vector import *;
__all__ = ['MovementGrid'];
scales = (1, 5, 10, 20, 40, 80);
class MovementGrid(object):
def __init__(self, width, height, color = (127, 127, 127 , 127)):
self.color = color;
self.width = width;
self.height = height;
self.scale = 3;
self.visible = True;
self.batch = None;
def reset_batch(self):
batch = pyglet.graphics.Batch();
scale = scales[self.scale];
for i in xrange(scale, self.width, scale):
batch.add(
2, pyglet.gl.GL_LINES, None,
('v2i', (i, -scale, i, self.height + scale)),
('c4B', tuple(chain(self.color, self.color))),
);
for j in xrange(scale, self.height, scale):
batch.add(
2, pyglet.gl.GL_LINES, None,
('v2i', (-scale, j, self.width + scale, j)),
('c4B', tuple(chain(self.color, self.color))),
);
self.batch = batch;
def draw(self):
if self.batch and self.visible: self.batch.draw();
def toggle_visibility(self):
if self.scale == 0: self.scale_up()
elif self.scale == 1: self.scale_down()
else: self.visible = not self.visible
def scale_up(self):
self.scale = min(self.scale + 1, len(scales) - 1);
if self.scale >= 1 and not self.visible: self.visible = True
self.reset_batch();
def scale_down(self):
self.scale = max(self.scale - 1, 0);
if self.scale < 1 and self.visible: self.visible = False
self.reset_batch();
def clamp_left_down(self, pos):
scale = scales[self.scale];
return scale * (pos // scale);
def up(self, pos, multiplier=1):
scale = scales[self.scale];
return self.clamp_left_down(pos + vec2f(0., scale * multiplier));
def right(self, pos, multiplier=1):
scale = scales[self.scale];
return self.clamp_left_down(pos + vec2f(scale * multiplier, 0));
def left(self, pos, multiplier=1):
scale = scales[self.scale];
return self.clamp_left_down(pos - vec2f(.01 + scale*(multiplier - 1), 0));
def down(self, pos, multiplier=1):
scale = scales[self.scale];
return self.clamp_left_down(pos - vec2f(0, .01 + scale*(multiplier - 1)));
|
14,505 | 5680e24fd897a6a92a830ef359713885f171f2ab | import numpy as np
def perform_thresholding(f,M,type):
"""
Only 3 types of thresholding currently implemented
"""
if type == "largest":
a = np.sort(np.ravel(abs(f)))[::-1] #sort a 1D copy of F in descending order
T = a[M]
y = f*(abs(f) > T)
elif type == "soft":
s = abs(f) - M
s = (s + abs(s))/2
y = np.sign(f)*s
elif type == "hard":
y = f*(abs(f) > M)
return y |
14,506 | b2c436bd35cf4864fd1a3fdfca797c8978620477 | # 在这里写上你的代码 :-)
'''
【备注】:#画line,线粗4,背景色: #ddeeff
'''
import tkinter
def tm056_1():
canvas = tkinter.Canvas(width=600, height=500, bg='#ddeeff')
canvas.pack(expand='yes', fill='both')
r=150
canvas.create_line(300 - r,250 - r,300 + r,250 + r, width=4)
canvas.mainloop()
tm056_1()
|
14,507 | 4861ac12a440e940703fed8dfa5774da6e520283 | import scrapy
from tutorial.items import JobsItem, DefaultLoader
from Helper.NetHelper import header
from scrapy.http import Request, FormRequest
class QuotesSpider(scrapy.Spider):
name = "51Job"
def start_requests(self):
# 需要的访问的列表
return [Request("https://login.51job.com/",
meta={'cookiejar': 1}, callback=self.post_login)] # 添加了meta
def post_login(self, response):
# 登陆成功后, 会调用after_login回调函数
return [FormRequest.from_response(response, #"http://www.zhihu.com/login",
# 注意这里cookie的获取
meta={'cookiejar': response.meta['cookiejar']},
headers=header,
formdata={
'loginname': '15505924050',
'password': 'op90--'
},
callback=self.after_login,
dont_filter=True
)]
def after_login(self, response):
print(response)
print(response.body)
from scrapy.shell import inspect_response
inspect_response(response, self)
urls = [
'https://i.51job.com/userset/user_discover.php?page={}'.format(str(i)) for i in range(1, 2)
]
for i, url in enumerate(urls):
return [Request(url, meta={'cookiejar': 1}, callback=self.parse)]
def parse(self, response):
# 登录后可以查看一下登录响应信息
print(response)
print(response.body)
from scrapy.shell import inspect_response
inspect_response(response, self)
selector = response.css('p.t1')
print(selector)
# loader = DefaultLoader(item=JobsItem(), selector=response)
# loader.add_css('title', 'div.final-question::text')
# loader.add_css('answer', 'div.green-answer-item::text')
# yield loader.load_item()
|
14,508 | dbf532c3401137b26ae0dbc21178c3b67cdf087f | import requests
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
url = 'https://www.metafilter.com/'
params = ['171194']
def get_post_text(url, params):
page = requests.get(url+params).content
soup = BeautifulSoup(page, 'html.parser')
post_text = soup.find('div', attrs={'class': 'copy'}).get_text()
return post_text
def get_post_ids(file):
'''returns list of post ids'''
pass
def save_documents():
pass
def getmfpages(params):
"""passed a list of post_ids returns the page content for those posts"""
url = 'https://www.metafilter.com/'
page = requests.get(url+params).content
|
14,509 | 6e2a5d9549f5aace1de039770aa2f3297374a07e | import pygame
import os
pygame.init()
win = pygame.display.set_mode((1000,500))
# loading image file inside the variable
bg_img = pygame.image.load("background.jpg")
# scale the image according to the window size
bg = pygame.transform.scale(bg_img, (1000, 500))
width = 1000
i = 0
run = True
while run:
pygame.time.delay(3)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
win.fill((0,0,0))
win.blit(bg, (i, 0))
win.blit(bg, (width + i, 0))
if i == -width:
win.blit(bg, (width + i, 0))
i = 0
i -= 1
pygame.display.update()
#pygame.quit() |
14,510 | 79f9e6fe14f86090b368a5f41d76731a04bc77fe | import socket, hashlib
def create_socket(addr):
proto, params = addr.split('://', 1)
if not proto.isalnum():
raise socket.error("Unknown protocol")
try: mod = getattr(__import__('socketutil.'+proto), proto)
except ImportError:
raise socket.error("Unknown protocol")
return mod.TheSocket(params)
|
14,511 | de431f201efa776cd310dbf3374e9dcaad60a62b | import json
import sys
def get_attribute_index(attribute):
key = ''
temp_list = []
keys_list = []
for char in attribute:
if char == '[' or char == ']':
if key != '':
temp_list.append(key)
key = ''
continue
key += char
for k in temp_list:
if k.isdigit():
keys_list.append(int(k))
continue
if k.startswith('-') and k[1:].isdigit():
keys_list.append(-int(k[1:]))
continue
keys_list.append(k)
return keys_list
def path_constructor(path):
attributes = path.split('.')
keys_list = []
for a in attributes:
if a.endswith(']'):
keys_list += get_attribute_index(a)
continue
keys_list.append(a)
return keys_list
def gson(json_file, path):
keys = path_constructor(path)
try:
file = open(json_file)
data = json.load(file)
except:
sys.stderr.write('Error: JSON file not found\n')
sys.exit(1)
try:
for k in keys:
data = data[k]
sys.stdout.write(str(data) + '\n')
except:
sys.stderr.write('Error: Property not found\n')
sys.exit(1)
gson(sys.argv[1], sys.argv[2])
import re
def parse_path_to_list(args):
params = re.findall('[a-zA-Z]+|[0-9]+', args)
return params
|
14,512 | 9eb8b7f4c6025aeb7007b28f8b72bfedc9bd62e9 | """URL definition for the beer site"""
from django.conf.urls import url, include
from beers.views import user, validation, contest
import beers.api.views as api_views
urlpatterns = [
url(r'^$', contest.index, name='index'),
url(r'^', include('django.contrib.auth.urls')),
url(r'^signup$', user.signup, name='signup'),
url(r'^profile', user.update_profile, name='profile'),
url(r'^contests/$', contest.contests, name='contests'),
url(r'^contests/add$', contest.contest_add, name='contest-add'),
url(r'^contests/(?P<contest_id>[0-9]+)/$',
contest.contest,
name='contest'),
url(r'^contests/(?P<contest_id>[0-9]+)/join$',
contest.contest_join,
name='contest-join'),
url(r'^contests/(?P<contest_id>[0-9]+)/validate$',
validation.unvalidated_checkins,
name='unvalidated-checkins'),
url(r'^contests/(?P<contest_id>[0-9]+)/unvalidated_checkins$',
validation.unvalidated_checkins_json,
name='unvalidated-checkins-json'),
url(r'^contests/(?P<contest_id>[0-9]+)/unvalidated_checkins/(?P<uv_checkin>[0-9]+)$',
validation.delete_checkin,
name='delete-checkin'),
url(r'^contests/(?P<contest_id>[0-9]+)/checkins$',
validation.validate_checkin,
name='validate-checkin'),
url(r'^contests/(?P<contest_id>[0-9]+)/players/(?P<username>[^/]+)$',
contest.contest_player,
name='contest-player'),
url(r'^contests/(?P<contest_id>[0-9]+)/players/$',
contest.contest_players,
name='contest-players'),
url(r'^contests/(?P<contest_id>[0-9]+)/beers/(?P<beer_id>[0-9]+)$',
contest.contest_beer,
name='contest-beer'),
url(r'^contests/(?P<contest_id>[0-9]+)/beers/$',
contest.contest_beers,
name='contest-beers'),
url(r'^contests/(?P<contest_id>[0-9]+)/breweries/$',
contest.contest_breweries,
name='contest-breweries'),
url(r'^contests/(?P<contest_id>[0-9]+)/breweries/(?P<brewery_id>[0-9]+)$',
contest.contest_brewery,
name='contest-brewery'),
url(r'^contests/(?P<contest_id>[0-9]+)/challenges/(?P<beer_id>[0-9]+)$',
contest.contest_challenge,
name='contest-challenge'),
url(r'^contests/(?P<contest_id>[0-9]+)/challenges/$',
contest.contest_challenges,
name='contest-challenges'),
url(r'^contests/(?P<contest_id>[0-9]+)/bonuses/$',
contest.contest_bonuses,
name='contest-bonuses'),
url(r'^contests/(?P<contest_id>[0-9]+)/bonuses/(?P<bonus_tag>[A-Za-z0-9]+)$',
contest.contest_bonus,
name='contest-bonus'),
url(r'^contests/(?P<contest_id>[0-9]+)/recover$',
validation.initiate_recover,
name='initiate-recover'),
url(r'^instructions$', contest.instructions, name='instructions'),
url(r'^api/players/$',
api_views.PlayerList.as_view(),
name='player-list',),
url(r'^api/players/(?P<user__username>[A-Za-z0-9_]+)$',
api_views.PlayerDetail.as_view(),
name='player-detail',),
url(r'^api/contests/$',
api_views.ContestList.as_view(),
name='contest-list',),
url(r'^api/contests/(?P<id>[0-9]+)$',
api_views.ContestDetail.as_view(),
name='contest-detail',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/players/$',
api_views.ContestPlayerList.as_view(),
name='contest-player-list',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/players/(?P<username>[A-Za-z0-9_]+)$',
api_views.ContestPlayerDetail.as_view(),
name='contest-player-detail',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/beers/$',
api_views.ContestBeerList.as_view(),
name='contest-beer-list',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/beers/(?P<contest_beer_id>[0-9]+)$',
api_views.ContestBeerDetail.as_view(),
name='contest-beer-detail',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/breweries/$',
api_views.ContestBreweryList.as_view(),
name='contest-brewery-list',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/breweries/(?P<contest_brewery_id>[0-9]+)$',
api_views.ContestBreweryDetail.as_view(),
name='contest-brewery-detail',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/bonuses/$',
api_views.ContestBonusList.as_view(),
name='contest-bonus-list',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/bonuses/(?P<contest_bonus_id>[0-9]+)$',
api_views.ContestBonusDetail.as_view(),
name='contest-bonus-detail',),
url(r'^api/contests/(?P<contest_id>[0-9]+)/unvalidated_checkins$',
api_views.UnvalidatedCheckinList.as_view(),
name='unvalidated-checkin-list',),
url(r'^api/unvalidated_checkins/(?P<id>[0-9]+)$',
api_views.UnvalidatedCheckinDetail.as_view(),
name='unvalidated-checkin-detail',),
url(r'^api/lookup/beer', api_views.BeerLookup.as_view(), name='beer-lookup'),
url(r'^api/lookup/brewery',
api_views.BreweryLookup.as_view(),
name='brewery-lookup'),
]
|
14,513 | 654bf311288c9bfed81e072caecfa926fa01b2e8 |
from collections import Counter
from AoC2018.util import util
def count_twos_threes(line):
"""
Counts the frequency of double and triple letters in a string.
Return list: first element is true if some letters occur exactly 2 times
and second element is true if some letters occur exactly 3 times.
:param line: input line
:return: list of 2 booleans
"""
c = Counter(line).values()
return [2 in c, 3 in c]
def calculate_checksum(lines):
"""
Tally up all the results of the count_tows_threes and multiply them together.
:param lines: input lines
:return: integer checksum
"""
twos = 0
threes = 0
for box in lines:
counts = count_twos_threes(box)
if counts[0]:
twos += 1
if counts[1]:
threes += 1
return twos * threes
@util.run_timer
def main():
result = calculate_checksum(util.get_input_lines('input'))
print(f'Checksum: {result}')
if __name__ == '__main__':
main()
|
14,514 | c08d9b9bb5ac9d2de74fd035a4fe4d8886989b5e | from .garden import Garden
from.bunny import Bunny
def eat(garden):
garden = Garden.from_matrix(garden)
bunny = Bunny(garden)
bunny.run()
return bunny.stomach
|
14,515 | ab0cb23753ceb3209488d6c1de1e943e8ef0eb7f | """
Domain object for tracking Civet jobs in the incremental submission system
"""
from sqlalchemy import *
from base import Base
import logging
from sqlalchemy.orm import relationship
from sqlalchemy import func
from session import Session
from status import Status
dependencies = Table('job_dependencies', Base.metadata,
Column('job_id', Integer, ForeignKey('job.id'),
primary_key=True),
Column('depends_on', Integer, ForeignKey('job.id'),
primary_key=True)
)
class Job(Base):
__tablename__ = 'job'
id = Column(Integer, primary_key=True, autoincrement=True)
pipeline_id = Column(Integer, ForeignKey('pipeline.id'))
pipeline = relationship('Pipeline', back_populates='jobs')
job_name = Column(String(50), nullable=False)
threads = Column(Integer, nullable=False)
# These paths may be long, but we don't really care as long as
# we are using sqlite3. It only has one (unlimited) text type.
stdout_path = Column(String(512), nullable=False)
stderr_path = Column(String(512), nullable=False)
script_path = Column(String(512), nullable=False)
epilog_path = Column(String(512), nullable=False)
walltime = Column(String(16))
mem = Column(Integer)
email_list = Column(String(512))
mail_options = Column(String(64))
queue = Column(String(128))
status_id = Column(Integer, default=Status.NOT_SET, nullable=False)
torque_id = Column(String(512))
env = Column(String(512))
depends_on = relationship('Job', secondary=dependencies,
primaryjoin=id == dependencies.c.job_id,
secondaryjoin=id == dependencies.c.depends_on,
backref='job_dependencies')
def __init__(self, pipeline, job_name, threads, stdout_path,
stderr_path, script_path, epilog_path, mem,
email_list, mail_options, env, depends_on, queue, walltime):
"""
Create a new Job object. All jobs are created in state Not Submitted.
:param pipeline: The pipeline of which this job is a part.
:param job_name: The name of this job.
:param threads: Number of threads to allocate.
:param stdout_path: Stdout path for torque (not the commands)
:param stderr_path: Stderr path for torque (not the commands)
:param script_path: The path to the script to be submitted.
:param epilog_path: Path to the epilog script.
:param mem: Amount of mem to allocate for this job. None means
unlimited.
:param email_list: Email address(es) to send status mail to.
:param mail_options: Options controlling when email is sent.
:param env: Environment to set for the running job.
:param depends_on: List of job ids that this job depends on.
Must already exist and be committed to the database.
"""
self.pipeline = pipeline
self.job_name = job_name
self.threads = threads
self.stdout_path = stdout_path
self.stderr_path = stderr_path
self.script_path = script_path
self.epilog_path = epilog_path
self.mem = mem
self.email_list = email_list
self.mail_options = mail_options
self.env = env
self.status_id = Status.get_id("Not Submitted")
self.depends_on = depends_on
self.queue = queue
self.walltime = walltime
def is_status(self, name):
return self.status_id == Status.get_id(name)
def set_status(self, name):
self.status_id = Status.get_id(name)
def get_status(self):
return Status.get_name(self.status_id)
def mark_submitted(self, torque_id):
logging.debug('Marked submitted: {} (log dir: {})'.format(
self.job_name, self.pipeline.log_directory))
self.status_id = Status.SUBMITTED
self.torque_id = torque_id
Session.commit()
def mark_complete_and_release_dependencies(self):
# Now let's complete that job.
logging.debug('Now completing job: {}, ID: {} (log dir: {})'.format(
self.job_name, self.id, self.pipeline.log_directory))
self.status_id = Status.COMPLETE
# Find all the jobs depending on the completed job.
dependent_jobs = Session.query(Job).filter(
Job.depends_on.any(Job.id == self.id))
for j in dependent_jobs:
logging.debug('Found dependent job: {0}'.format(j.job_name))
j.depends_on.remove(self)
logging.debug("New dependencies with completed job removed: {0}".
format([x.job_name for x in j.depends_on]))
Session.commit()
def __repr__(self):
return '<Job: ID={0} Pipeline={1} JobName={2} ' \
'StdoutPath={3} StderrPath={4} ScriptPath={5} EpilogPath={6} ' \
'Mem={7} EmailList={8} MailOptions={9} Env={10} StatusID={11} ' \
'TorqueID={12} Dependencies={13} Queue={14} Walltime={15}>'.\
format(
self.id,
self.pipeline.name,
self.job_name,
self.stdout_path,
self.stderr_path,
self.script_path,
self.epilog_path,
self.mem,
self.email_list,
self.mail_options,
self.env,
self.status_id,
self.torque_id,
self.depends_on,
self.queue,
self.walltime)
def str_for_pipeline(self):
status_name = Status.get_name(self.status_id)
return '<Job: ID={0} Name={1} Status={2}>'.format(
self.id, self.job_name, status_name)
def __str__(self):
deps = []
for dep in self.depends_on:
deps.append('<Job: ID={0} Name={1}>'.format(dep.id, dep.job_name))
stat = Status.get_name(self.status_id)
return '<Job: ID={0} Pipeline="{1}" JobName="{2}" ' \
'StdoutPath="{3}" StderrPath="{4}" ScriptPath="{5}" ' \
'EpilogPath="{6}" Mem={7} EmailList="{8}" MailOptions="{9}" ' \
'Env="{10}" StatusID={11} ' \
'TorqueID="{12}" Dependencies={13} Queue={14} Walltime={15}>'.\
format(
self.id,
self.pipeline.name,
self.job_name,
self.stdout_path,
self.stderr_path,
self.script_path,
self.epilog_path,
self.mem,
self.email_list,
self.mail_options,
self.env,
stat,
self.torque_id,
deps,
self.queue,
self.walltime)
@staticmethod
def scan_for_runnable(limit=None):
"""
Scans the database for jobs that are eligible to run; in other words,
those with an empty dependency list and the status "Not Submitted".
:param limit: Place an arbitrary limit on the number of jobs returned.
:return: A list of runnable jobs.
"""
logging.debug('Finding runnable jobs')
ready_jobs_query = Session.query(Job).filter(~Job.depends_on.any()). \
filter_by(status_id=Status.NOT_SUBMITTED)
if limit:
ready_jobs_query = ready_jobs_query.limit(limit)
ready_jobs = ready_jobs_query.all()
if not ready_jobs:
logging.debug('No jobs are ready to execute')
else:
logging.debug('The jobs that are ready to execute are:')
for j in ready_jobs:
logging.debug(' {} (log dir: {})'.format(
j.job_name, j.pipeline.log_directory))
return ready_jobs
@staticmethod
def count_submitted():
count_q = Session.query(Job).filter_by(
status_id=Status.SUBMITTED).statement. \
with_only_columns([func.count()]).order_by(None)
count = Session.session.execute(count_q).scalar()
logging.debug("Counted {} submitted jobs".format(count))
return count
@staticmethod
def get_all():
jobs = Session.query(Job).all()
logging.debug("Jobs.get_all() returned {} jobs".format(len(jobs)))
return jobs
|
14,516 | a78b594ecb47a9485a72c64524e9eed3bf237e37 | from django.db import models
# Create your models here.
class User(models.Model):
score = models.IntegerField()
coins = models.IntegerField()
def __str__(self):
return f'{self.pk}'
class Windmills(models.Model):
user_id = models.ForeignKey('User', on_delete=models.CASCADE)
height = models.IntegerField()
starts = models.BooleanField(default=False)
lat = models.CharField(max_length=30, default='')
long = models.CharField(max_length=30, default='')
county = models.CharField(max_length=30, default='')
def __str__(self):
return f'{self.pk}'
class WMData(models.Model):
speed = models.CharField(max_length=30, default='')
height = models.CharField(max_length=30, default='')
year = models.CharField(max_length=30, default='')
def __str__(self):
return f'{self.pk}'
class Game_WMill_Asset(models.Model):
county_id = models.IntegerField()
county_name = models.CharField(max_length=50, default='')
WMill_Height = models.IntegerField()
WMill_Speed = models.CharField(max_length=50, default='')
WMill_Power = models.IntegerField()
WMill_Coins = models.IntegerField()
def __str__(self):
return f'{self.county_id}'
|
14,517 | 86222835a111e33684463047783439bb596c8247 | import json
import re
import urllib
from bs4 import BeautifulSoup
from decimal import Decimal
from storescraper.product import Product
from storescraper.store import Store
from storescraper.utils import session_with_proxy, check_ean13, \
html_to_markdown
class Pontofrio(Store):
preferred_discover_urls_concurrency = 3
preferred_products_for_url_concurrency = 3
@classmethod
def categories(cls):
return [
'StorageDrive',
'ExternalStorageDrive',
'MemoryCard',
'UsbFlashDrive',
'SolidStateDrive',
]
@classmethod
def discover_urls_for_category(cls, category, extra_args=None):
category_paths = [
['Informatica/ComponentesePecas/HDInterno/?Filtro=C56_C68_C2781',
'StorageDrive'],
['Informatica/HDExterno/?Filtro=C56_C67',
'ExternalStorageDrive'],
['TelefoneseCelulares/CartoesdeMemoria/?Filtro=C38_C32',
'MemoryCard'],
['Informatica/PenDrives/?Filtro=C56_C66',
'UsbFlashDrive'],
]
product_urls = []
session = session_with_proxy(extra_args)
session.headers['User-Agent'] = \
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, ' \
'like Gecko) Chrome/66.0.3359.117 Safari/537.36'
session.headers['Accept-Language'] = \
'en-US,en;q=0.9,es;q=0.8,pt;q=0.7,pt-BR;q=0.6'
for category_path, local_category in category_paths:
if local_category != category:
continue
page = 1
while True:
category_url = \
'https://www.pontofrio.com.br/{}&paginaAtual={}' \
''.format(category_path, page)
if page >= 120:
raise Exception('Page overflow: ' + category_url)
soup = BeautifulSoup(session.get(
category_url, timeout=30).text, 'html.parser')
products = soup.findAll('div', 'hproduct')
if not products:
if page == 1:
raise Exception('Empty category: ' + category_url)
break
for product in products:
product_url = product.find('a')
if 'href' not in dict(product_url.attrs):
continue
product_url = product_url['href'].split('?')[0]
product_urls.append(product_url)
page += 1
return product_urls
@classmethod
def products_for_url(cls, url, category=None, extra_args=None):
session = session_with_proxy(extra_args)
session.headers['User-Agent'] = \
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, ' \
'like Gecko) Chrome/66.0.3359.117 Safari/537.36'
session.headers['Accept-Language'] = \
'en-US,en;q=0.9,es;q=0.8,pt;q=0.7,pt-BR;q=0.6'
page_source = session.get(url, timeout=30).text
pricing_data = re.search(r'var siteMetadata = ([\S\s]+?);',
page_source).groups()[0]
pricing_data = json.loads(pricing_data)['page']
if 'product' not in pricing_data:
return []
pricing_data = pricing_data['product']
name = urllib.parse.unquote(pricing_data['fullName'])
sku = pricing_data['idSku']
price = Decimal(pricing_data['salePrice'])
if pricing_data['StockAvailability']:
stock = -1
else:
stock = 0
soup = BeautifulSoup(page_source, 'html.parser')
ean_container = soup.find('span', 'productEan')
if ean_container:
ean = re.search(r'EAN (\d+)', ean_container.text).groups()[0]
if len(ean) == 12:
ean = '0' + ean
if not check_ean13(ean):
ean = None
else:
ean = None
description = html_to_markdown(
str(soup.find('div', 'detalhesProduto')))
picture_urls = [tag.find('img')['src'].replace('\xa0', '%20')
for tag in soup.findAll('a', 'jqzoom')]
p = Product(
name,
cls.__name__,
category,
url,
url,
sku,
stock,
price,
price,
'BRL',
sku=sku,
ean=ean,
description=description,
picture_urls=picture_urls
)
return [p]
|
14,518 | f36996b7205d8e5527d68028e61e7bade57560f3 | #!/usr/bin/python
# --*-- encoding:utf-8 --*--
####################################################
# Zhihu Auto-Aogin
#
#
# Created on : 03/07/17
# Last Modified :
#
# Author : Pengcheng Zhou(Kevin)
#
####################################################
import re
from urllib import parse, request, error
from multiprocessing import Pool
import http.cookiejar as Cookie
import time
import json
from getpass import getpass
import ssl
# Cancel the certification of target site
ssl._create_default_https_context = ssl._create_unverified_context
import os
from User_Getter import User_Getter
import random
cookieFile = 'zhihu_cookie.txt'
class RedirectHandler(request.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
print ("Cookie过期,重新登录中....")
return
http_error_301 = http_error_302
class Zhihu(object):
def __init__(self) :
'''
Initialize
'''
self.pool = Pool(4)
self.cj = Cookie.MozillaCookieJar(cookieFile)
self.opener = request.build_opener(request.HTTPCookieProcessor(self.cj), RedirectHandler())
self.client_info = 'monsterzpc@gmail.com'
self.passwd = 'Zpc920515'
self.url = 'https://www.zhihu.com/login/email'
self.target_page = ''
self.position = 0
print('''
############################################################
# #
# Zhihu Auto_Login and Crawler by Pengcheng Zhou. #
# #
############################################################
''')
def get_xsrf(self) :
'''
Get a special dynamic string for login
'''
login_target_page = request.urlopen(self.url)
pattern = re.compile('<input type="hidden" name="_xsrf" value="(.*)"/>')
_xsrf = re.findall(pattern, login_target_page.read().decode('utf-8'))[0]
return _xsrf
def get_captcha_url(self):
url = 'https://www.zhihu.com' + '/captcha.gif?r=' + str(int(time.time())) + '&type=login'
f = request.urlopen(url)
with open('./cap.png', 'wb') as image:
image.write(f.read())
image.close()
def login(self):
'''
Execution of login
'''
if (self.client_info == '' or self.passwd == '') :
self.client_info = input('请输入账号:')
self.passwd = getpass('请输入密码:')
self.get_captcha_url()
captcha = input('请输入验证码:')
if (self.client_info.find("@") != -1) :
print('''正在使用邮箱登录...\n用户名:''' + self.client_info+ '\n' + '密码 : ' + len(self.passwd) * '*'+ '\n' )
else :
self.url = ''
print('正在使用手机登录...')
form = {'_xsrf' : self.get_xsrf(),
'password' : self.passwd,
'email' : self.client_info,
'captcha': captcha }
print(form)
try:
req = request.Request(self.url, parse.urlencode(form).encode('utf-8'))
f = self.opener.open(req)
self.cj.save()
print(json.loads(f.read().decode('utf-8'))["msg"] + "!")
print("=" * 100)
except:
print('Error!')
def get_capthca(self) :
'''
Interface for getting the captcha
'''
pass
def get_target_page(self):
'''
Get main target_page content after logged in
'''
try:
self.cj.load()
print('Cookie loaded....')
self.target_page = self.opener.open('https://www.zhihu.com/people/edit')
f = open('zhihu.html', 'wb')
f.write(target_page_content.read())
except:
self.login()
self.get_target_page()
def isLogged(self, user_client):
'''
test if Logged
'''
f = user_client.open('https://www.zhihu.com/settings/profile').geturl()
if (f != 'https://www.zhihu.com/settings/profile'):
return False
return True
def user_getter(sefl):
return User_Getter('https://www.zhihu.com/people/xiao-guai-shou-2/activities').urls()
def profile_collector(self, text_path=None) :
'''
main entry for collecting user's profile including id, gender, education, career
'''
count = 0
self.cj.load()
user_list = []
# check the source of the data
if (text_path != None) :
with open(text_path, 'r') as source_list :
for line in source_list :
user_list.append(line.split('\n')[0])
source_list.close()
else :
user_list = []
initial_time = time.time()
while(len(user_list) > 0):
# this try except block is for resuming from the server' shutdown
try :
for item in user_list :
start_time = time.time()
user_id = item
print('=Writing information of [', user_id,']...')
url = 'https://www.zhihu.com/api/v4/members/' + user_id + '?include=locations%2Cemployments%2Cgender%2Ceducations%2Cbusiness%2Cvoteup_count%2Cthanked_Count%2Cfollower_count%2Cfollowing_count%2Ccover_url%2Cfollowing_topic_count%2Cfollowing_question_count%2Cfollowing_favlists_count%2Cfollowing_columns_count%2Canswer_count%2Carticles_count%2Cpins_count%2Cquestion_count%2Ccommercial_question_count%2Cfavorite_count%2Cfavorited_count%2Clogs_count%2Cmarked_answers_count%2Cmarked_answers_text%2Cmessage_thread_token%2Caccount_status%2Cis_active%2Cis_force_renamed%2Cis_bind_sina%2Csina_weibo_url%2Csina_weibo_name%2Cshow_sina_weibo%2Cis_blocking%2Cis_blocked%2Cis_following%2Cis_followed%2Cmutual_followees_count%2Cvote_to_count%2Cvote_from_count%2Cthank_to_count%2Cthank_from_count%2Cthanked_count%2Cdescription%2Chosted_live_count%2Cparticipated_live_count%2Callow_message%2Cindustry_category%2Corg_name%2Corg_homepage%2Cbadge%5B%3F(type%3Dbest_answerer)%5D.topics'
req = request.Request(url)
raw_data = self.opener.open(req).read()
json_data = json.loads(raw_data)
# get key and value
pic_url = json_data["avatar_url"].split('_')[0] + '_xll.jpg'
number_id = json_data["id"]
user_name = json_data["name"]
# education
if ("educations" in json_data) :
if (len(json_data["educations"]) != 0) :
if ("school" in json_data["educations"][0]) :
university = json_data["educations"][0]["school"]["name"]
else :
university = 'None'
if ("major" in json_data["educations"][0]) :
major = json_data["educations"][0]["major"]["name"]
else:
major = 'None'
else :
university = 'None'
major = 'None'
else :
university = 'None'
major = 'None'
# employments
if ("employments" in json_data) :
if (len(json_data["employments"]) != 0) :
if ("company" in json_data["employments"][0]) :
company = json_data["employments"][0]["company"]["name"]
else :
company = 'None'
if ("occupation" in json_data["employments"][0]) :
occupation = json_data["employments"][0]["job"]["name"]
else :
occupation = 'None'
else :
company = 'None'
occupation = 'None'
else :
company = 'None'
occupation = 'None'
# location
if ("locations" in json_data) :
if (len(json_data["locations"]) != 0) :
location = json_data["locations"][0]["name"]
else :
location = 'None'
else :
location = 'None'
# business
if ("business" in json_data ) :
industry = json_data["business"]["name"]
else :
industry = 'None'
intro = json_data["headline"]
autobiography = json_data["description"]
user_type = json_data["type"]
follower_count = json_data["follower_count"]
following_count = json_data["following_count"]
answers_count = json_data["answer_count"]
articles_count = json_data["articles_count"]
if (json_data["gender"] == 1) :
gender = 'male'
else :
gender = 'female'
data = {
'id' : number_id,
'user_id' : user_id,
'name' : user_name,
'gender' : gender,
'university' : university,
'major' : major,
'industry' : industry,
'company' : company,
'occupation' : occupation,
'location' : location,
'intro' : intro,
'autobiography' : autobiography,
'user_type' : str(user_type),
'follower_count' : str(follower_count),
'following_count' : str(following_count),
'answer-count' : str(answers_count),
'articles_count' : str(articles_count)
}
# process folder
if not (os.path.exists(os.path.join('./data/' ,user_name))): # check if the folder exists
os.makedirs(os.path.join('./data/' ,user_name))
path = os.path.join('./data/' ,user_name) + '/'
# generate store path
store_path = path + user_id
# write picture
with open(store_path + '.png', 'wb') as f:
f.write(self.bytes_getter(pic_url))
f.close()
target_page_url = 'https://www.zhihu.com/people/' + user_id + '/activities'
# write target_page
with open(store_path +'.html', 'wb') as f:
f.write(self.bytes_getter(target_page_url))
f.close()
with open(store_path + '.txt', 'w', encoding='utf-8') as f:
for item, value in data.items():
line = json.dumps(item + ":" + value, ensure_ascii=False) + "\n"
f.write(line)
#f.write(json.dumps(data, ensure_ascii=False))
f.close()
count += 1
print('Wrote Successfully! Time consumed :','%.2f'%(time.time() - start_time),"seconds. Crawled ",count, "users till now.")
print('[Total time:', '%.2f'%(time.time() - initial_time),'seconds]')
if (count % 10 == 0) :
cool_start = time.time()
cool_down_time = random.randint(0, 10)
print('#' * 20,'Cooling down for',cool_down_time,' seconds.','#' * 20)
time.sleep(cool_down_time)
time.sleep(1.5)
# record the position before a exception happens
self.position = user_id
except Exception as e:
print('Error! ', e)
# recover from exception, resume crawling from last user
finally :
index = user_list.index(self.position) + 1
user_list = user_list[index:]
time.sleep(10)
print('#'*20,'Resuming from server shutdown','#'*20)
# for retrieving document
def unicode_getter(self, target_url) :
return self.opener.open(target_url).read().decode('utf-8')
# for retrieving bytes such as pics
def bytes_getter(self, target_url) :
return self.opener.open(target_url).read()
# record ruuning time of program
start_time = time.time()
Zhihu = Zhihu()
Zhihu.login()
#Zhihu.profile_collector('./user_list.txt')
end_time = time.time()
print("[Totally elapsed: " , '%.2f'%(end_time - start_time), " seconds.]")
|
14,519 | 11203073788b38ed27d4ad89a93870582165e46b | from PyQt5 import QtCore
from PyQt5 import Qt
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QTableWidgetItem
import config_enterTable_widget
class EnterTableWidget(QtWidgets.QDialog, config_enterTable_widget.Ui_enter_Table_Widget):
def __init__(self, parent = None):
# Это здесь нужно для доступа к переменным, методам
# и т.д. в файле config_options_dialog.py
super().__init__(parent)
self.main = parent
self.initUi()
def initUi(self):
self.setupUi(self)
self.load_And_Display_Button.clicked.connect(self.display_data)
def set_size_data_Table_Widget(self, data_arrays):
rows = max(len(array) for array in data_arrays)
columns = len(data_arrays)
self.main.data_Table_Widget.setRowCount(rows + 10)
self.main.data_Table_Widget.setColumnCount(columns + 2)
def fill_data_Table(self, data_arrays):
for data_array in data_arrays:
for number in data_array:
self.main.data_Table_Widget.setItem(data_array.index(number), data_arrays.index(data_array),
QTableWidgetItem(f'{number}'))
def load_data_from_input_field(self):
from data_functions import create_data_arrays_from_str
data_str = self.entry_Field_PlainText_Widget.toPlainText() + '\n'
return create_data_arrays_from_str(data_str)
def display_data(self):
self.main.data_Table_Widget.clear()
data_arrays = self.load_data_from_input_field()
self.main.set_size_data_Table_Widget(data_arrays)
self.main.fill_data_Table(data_arrays)
|
14,520 | c4b1ddf5cec23a59ba47ae5aa7613644b1c488ec | import os
import numpy as np
import scipy.io as spio
from scipy.ndimage import gaussian_filter1d
def jittered_neuron(t=None, feature=None, n_trial=61, jitter=1.0, gain=0.0, noise=0.05, seed=1234):
"""Generates a synthetic dataset of a single neuron with a jittered firing pattern.
Parameters
----------
t : array_like
vector of within-trial timepoints
feature : function
produces a jittered instance of the feature (takes time shift as an input)
n_trial : int
number of trials
jitter : float
standard deviation of trial-to-trial shifts
gain : float
standard deviation of trial-to-trial changes in amplitude
noise : float
scale of additive gaussian noise
seed : int
seed for the random number generator
Returns
-------
canonical_feature : array_like
vector of firing rates on a trial with zero jitter
aligned_data : array_like
n_trial x n_time x 1 array of de-jittered noisy data
jittered_data : array_like
n_trial x n_time x 1 array of firing rates with jitter and noise
"""
# default time base
if t is None:
t = np.linspace(-5, 5, 150)
# default feature
if feature is None:
feature = lambda tau: np.exp(-(t-tau)**2)
# noise matrix
np.random.seed(seed)
noise = noise*np.random.randn(n_trial, len(t))
# generate jittered data
gains = 1.0 + gain*np.random.randn(n_trial)
shifts = jitter*np.random.randn(n_trial)
jittered_data = np.array([g*feature(s) for g, s in zip(gains, shifts)]) + noise
# generate aligned data
aligned_data = np.array([g*feature(0) for g in gains]) + noise
return feature(0), np.atleast_3d(aligned_data), np.atleast_3d(jittered_data)
|
14,521 | d141bb4118adbe017f54e9013e07ab5218d2fa55 | import mysql.connector
from mysql.connector.constants import ClientFlag
print ("MySQL connector module import succeed")
config = {
'user':'root',
'password':'root',
'host':'35.229.99.14',
'client_flags':[ClientFlag.SSL],
'database':'room1',
'ssl_ca':'/home/pi/server-ca.pem',
'ssl_cert':'/home/pi/client-cert.pem',
'ssl_key':'/home/pi/client-key.pem',
}
conn = mysql.connector.connect(**config)
conn.close()
|
14,522 | 5cd9f33a94835c5e1a84925af1b47ba56b9ba5ee | """
<Program>
test_sqli.py
<Purpose>
Some unit tests for the sqlite3 interface for the dependency tools.
"""
import depresolve
import depresolve.depdata as depdata
import testdata
import depresolve.sql_i as sqli
def main():
deps = testdata.DEPS_MODERATE
versions_by_package = depdata.generate_dict_versions_by_package(deps)
(edeps, packs_wout_avail_version_info, dists_w_missing_dependencies) = \
depdata.elaborate_dependencies(deps, versions_by_package)
assert depdata.are_deps_valid(testdata.DEPS_MODERATE) and \
depdata.are_deps_valid(testdata.DEPS_SIMPLE), \
'The test dependencies are coming up as invalid for some reason....'
# Clear any pre-existing test database.
sqli.initialize(db_fname='data/test_dependencies.db')
sqli.delete_all_tables()
sqli.populate_sql_with_full_dependency_info(
edeps, versions_by_package, packs_wout_avail_version_info,
dists_w_missing_dependencies, db_fname='data/test_dependencies.db')
print('All tests in main() OK.')
if __name__ == '__main__':
main()
|
14,523 | c9792a255da837ffcddd29ac22143b486af0d7f0 | import os
import socket
import threading
import time
import gbn
CLIENT_SEND_HOST = '127.0.0.1'
CLIENT_SEND_PORT = 8080
CLIENT_SEND_ADDR = (CLIENT_SEND_HOST, CLIENT_SEND_PORT)
CLIENT_RECV_HOST = '127.0.0.1'
CLIENT_RECV_PORT = 8023
CLIENT_RECV_ADDR = (CLIENT_RECV_HOST, CLIENT_RECV_PORT)
CLIENT_DIR = os.path.dirname(__file__) + '/client'
SERVER_SEND_HOST = '127.0.0.1'
SERVER_SEND_PORT = 8023
SERVER_SEND_ADDR = (SERVER_SEND_HOST, SERVER_SEND_PORT)
SERVER_RECV_HOST = '127.0.0.1'
SERVER_RECV_PORT = 8080
SERVER_RECV_ADDR = (SERVER_RECV_HOST, SERVER_RECV_PORT)
SERVER_DIR = os.path.dirname(__file__) + '/server'
def send(sender, directory):
fp = open(directory + '/data.jpg', 'rb')
dataList = []
while True:
data = fp.read(2048)
if len(data) <= 0:
break
dataList.append(data)
print('The total number of data packets: ', len(dataList))
pointer = 0
while True:
while sender.next_seq < (sender.send_base + sender.window_size):
if pointer >= len(dataList):
break
# 发送窗口未被占满
data = dataList[pointer]
checksum = gbn.getChecksum(data)
if pointer < len(dataList) - 1:
sender.packets[sender.next_seq] = sender.make_pkt(sender.next_seq, data, checksum,
stop=False)
else:
sender.packets[sender.next_seq] = sender.make_pkt(sender.next_seq, data, checksum,
stop=True)
print('Sender send packet:', pointer)
sender.udp_send(sender.packets[sender.next_seq])
sender.next_seq = (sender.next_seq + 1) % 256
pointer += 1
flag = sender.wait_ack()
if pointer >= len(dataList):
break
fp.close()
def receive(receiver, directory):
fp = open(directory + '/' + str(int(time.time())) + '.jpg', 'ab')
reset = False
while True:
data, reset = receiver.wait_data()
print('Data length:', len(data))
fp.write(data)
if reset:
receiver.expect_seq = 0
fp.close()
break
clientReceiverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
clientReceiverSocket.bind(CLIENT_RECV_ADDR)
clientReceiver = gbn.GBNReceiver(clientReceiverSocket)
thread1 = threading.Thread(target=receive, args=(clientReceiver, CLIENT_DIR))
thread1.start()
serverReceiverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverReceiverSocket.bind(SERVER_RECV_ADDR)
serverReceiver = gbn.GBNReceiver(serverReceiverSocket)
thread2 = threading.Thread(target=receive, args=(serverReceiver, SERVER_DIR))
thread2.start()
clientSenderSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
clientSender = gbn.GBNSender(clientSenderSocket, CLIENT_SEND_ADDR)
serverSenderSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serverSender = gbn.GBNSender(serverSenderSocket, SERVER_SEND_ADDR)
_ = input('Press key to continue:')
send(clientSender, CLIENT_DIR)
send(serverSender, SERVER_DIR)
|
14,524 | ca335abef14163e5d214295deb6620f030c5c08a | from pymongo import MongoClient
from flask import Flask, g, render_template, abort, request, jsonify
from bson.json_util import dumps
from bson.objectid import ObjectId
# Setup Flask
app = Flask(__name__)
app.config.from_object(__name__)
# Main api page
@app.route('/api/')
def api_help():
return render_template('games_api.html'), 200
# GET ALL /api/v1/games
@app.route('/api/v1/games', methods=['GET'])
def get_all_games():
client = MongoClient("mongodb+srv://MIS5400Admin:5400MIS@mis5400-rgwuo.mongodb.net/test?retryWrites=true&w=majority")
games_db = client.bgg
game_collection = games_db.user_games.find()
return dumps(game_collection), 200
# GET ONE /api/v1/game/gameid
@app.route('/api/v1/games/<string:id>', methods=['GET'])
def get_one_game(id):
client = MongoClient(
"mongodb+srv://MIS5400Admin:5400MIS@mis5400-rgwuo.mongodb.net/test?retryWrites=true&w=majority")
games_db = client.bgg
game = games_db.user_games.find_one({'id': int(id)})
return dumps(game), 200
# Add one game /api/v1/games
@app.route("/api/v1/games", methods=['POST'])
def add_game():
try:
client = MongoClient("mongodb+srv://MIS5400Admin:5400MIS@mis5400-rgwuo.mongodb.net/test?retryWrites=true&w=majority")
games_db = client.bgg
game_collection = games_db.user_games
max_record = (game_collection.find().sort("id", -1).limit(1))
max_num = 0
for entry in max_record:
max_num = (entry["id"] + 1)
new_game = request.get_json()
new_game["id"] = max_num
game_collection.insert_one(new_game)
return "Game Inserted", 201
except Exception as e:
return "Problem Inserting Game", 500
#Delete an entry
@app.route('/api/v1/games/<string:id>', methods=['DELETE'])
def delete_one_game(id):
try:
client = MongoClient("mongodb+srv://MIS5400Admin:5400MIS@mis5400-rgwuo.mongodb.net/test?retryWrites=true&w=majority")
games_db = client.bgg
game = games_db.user_games.delete_one({"id":int(id)})
return 'Game Deleted', 200
except Exception as e:
return "Problem Deleting Game", 500
if __name__ == '__main__':
app.run(host="0.0.0.0") |
14,525 | efe25827c2a539f8feb05b5e502ff54884e0b5c4 | import pytest
from fhepy.polynomials import Polynomials
from fhepy.zmodp import ZMod
ZMod2 = ZMod(2)
ZMod7 = ZMod(7)
ZMod11 = ZMod(11)
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0], "0"),
(ZMod2, [1], "1"),
(ZMod2, [3], "1"),
(ZMod7, [6], "6"),
(ZMod7, [7], "0")])
def test_constant(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0, 1], "x"),
(ZMod2, [1, 1], "x + 1"),
(ZMod2, [3, 3], "x + 1"),
(ZMod7, [6, 1], "x + 6"),
(ZMod7, [7, 6], "6x")])
def test_degree_1(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0, 1, 1], "x**2 + x"),
(ZMod2, [1, 1, 1], "x**2 + x + 1"),
(ZMod2, [1, 0, 1], "x**2 + 1"),
(ZMod2, [0, 0, 1], "x**2"),
(ZMod2, [3, 3, 3], "x**2 + x + 1"),
(ZMod7, [6, 0, 1], "x**2 + 6"),
(ZMod7, [7, 0, 6], "6*(x**2)"),
(ZMod7, [6, 1, 1], "x**2 + x + 6"),
(ZMod7, [7, 1, 6], "6*(x**2) + x"),
(ZMod7, [6, 2, 1], "x**2 + 2x + 6"),
(ZMod7, [7, 5, 6], "6*(x**2) + 5x")
])
def test_degree_2(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
|
14,526 | e02e670474a8f16ae32b195cc64ea3665cb6b8ac |
"""#####################################################################################################################
# This is a simple game in which you will enter different combinations of card and it will tell you to winning situation
#####################################################################################################################"""
""""# This is the main input list which will took input from the user in the form of two character values
# First character is the suit
# 2nd Character is the face value
# Note: All the inputs will be in a single list """
input_list = []
# We are splitting main input list into different small string according to their suits
hearts_list, spades_list, clubs_list, diamonds_list = [], [], [], []
# face_value variable is the list which will store only the inputs suits values
face_value = []
# cards_values is the list which will store all the face values
cards_values = []
# Here we are counting the occurrence of suits values and storing them in corresponding variables
# We are making the 4 different lists each contain only type of suits
hearts_count, spades_count, diamonds_count, clubs_count = 0, 0, 0, 0
def initial_fun():
global hearts_count, spades_count, diamonds_count, clubs_count
for x in input_list:
face_value.append(x[1])
cards_values.append(x[0])
for idx in range(len(face_value)):
# Counting Hearts and storing in separate list
if face_value[idx] == 'H':
hearts_count += 1
hearts_list.append(input_list[idx])
# Counting Spades and storing in separate list
elif face_value[idx] == 'S':
spades_count += 1
spades_list.append(input_list[idx])
# Counting Diamonds and storing in separate list
elif face_value[idx] == 'D':
diamonds_count += 1
diamonds_list.append(input_list[idx])
# Counting Clubs and storing in separate list
elif face_value[idx] == 'C':
clubs_count += 1
clubs_list.append(input_list[idx])
"""******************************************************************************************
# list_number_asg method will take list of face values and return its with assign number
******************************************************************************************"""
def list_number_asg(card_val):
for j in range(len(card_val)):
# Changing values of non integer values in list with integer values
if 'A' in card_val:
index = card_val.index('A')
card_val[index] = 14
if 'K' in card_val:
index = card_val.index('K')
card_val[index] = 13
if 'Q' in card_val:
index = card_val.index('Q')
card_val[index] = 12
if 'J' in card_val:
index = card_val.index('J')
card_val[index] = 11
if 'T' in card_val:
index = card_val.index('T')
card_val[index] = 10
return card_val
"""*********************************************************************************************
# face_value_count function will count the number of occurrences of different card
# and store them in a global list
**********************************************************************************************"""
cards_count_list = []
def face_values_count(card_ls):
if 'A' in card_ls:
cards_count_list.append(card_ls.count('A'))
if 'K' in card_ls:
cards_count_list.append(card_ls.count('K'))
if 'Q' in card_ls:
cards_count_list.append(card_ls.count('Q'))
if 'T' in card_ls:
cards_count_list.append(card_ls.count('T'))
if '9' in card_ls:
cards_count_list.append(card_ls.count('9'))
if '8' in card_ls:
cards_count_list.append(card_ls.count('8'))
if '7' in card_ls:
cards_count_list.append(card_ls.count('7'))
if '6' in card_ls:
cards_count_list.append(card_ls.count('6'))
if '5' in card_ls:
cards_count_list.append(card_ls.count('5'))
if '4' in card_ls:
cards_count_list.append(card_ls.count('4'))
if '3' in card_ls:
cards_count_list.append(card_ls.count('3'))
if '2' in card_ls:
cards_count_list.append(card_ls.count('2'))
"""*********************************************************************************************
# The royal_ flush function will take input argument a list of same suits and return true / false
**********************************************************************************************"""
def royal_flush(suit_ls):
# check if suits values are equal or greater then 5
if len(suit_ls) == 5 or len(suit_ls) > 5:
card_val = []
# Appending all the cards number of same suits in separate list
for j in suit_ls:
card_val.append(j[0])
result = all(elem in card_val for elem in ['A', 'K', 'Q', 'J', 'T'])
return True if result else False
else:
return False
"""*********************************************************************************************
# straight_flush function will take input argument a list of same suits and return true / false
**********************************************************************************************"""
def straight_flush(suit_ls):
# check if suits values are equal or greater then 5
if len(suit_ls) == 5 or len(suit_ls) > 5:
card_val = []
# Appending all the cards number of same suits in separate list
for j in suit_ls:
card_val.append(j[0])
# Now changing values of non integer values in list with integer values
card_val = list_number_asg(card_val)
# Converting all the String values into Integer
for o in range(len(card_val)):
y = int(card_val[o])
card_val[o] = y
# Sorting list into Descending Order
card_val.sort(reverse=True)
hit = 0
# Checking consecutive number in list and increasing values of hit Variable
for o in range(len(card_val)-1):
if card_val[o]-1 == card_val[o+1]:
hit += 1
else:
hit = 0
# if we have 4 or greater than 4 consecutive hit return True else False
return True if hit == 4 or hit > 4 else False
else:
return False
"""*********************************************************************************************
# for_of_a_kind is the function which will take cards list as argument and return True / False
**********************************************************************************************"""
def four_of_a_kind(card_val):
for j in range(len(card_val)):
if card_val.count(card_val[j]) == 4:
return True
return False
"""*********************************************************************************************
# This function will take cards list as argument and will return true / false
**********************************************************************************************"""
def full_house():
# Calling function to count number of same cards and storing it into new list
if len(cards_count_list) == 0:
face_values_count(cards_values)
# Coping global list into local list for alterations
card_cnt_ls = cards_count_list[:]
# Finding Maximum occurrence of a card
if max(card_cnt_ls) == 3:
# Removing maximum value card from count list
card_cnt_ls.remove(max(card_cnt_ls))
loop_count = len(card_cnt_ls)
# Checking next card occurrence in count list
for j in range(len(card_cnt_ls)):
if card_cnt_ls[j] == 2 or card_cnt_ls[j] == 3:
break
else:
loop_count -= 1
# Returning true if a 3 and 2 duplicate cards found otherwise false
return False if loop_count == 0 else True
else:
return False
"""*********************************************************************************************
# This function will take list of different suits as argument and return true / false
**********************************************************************************************"""
def flush(ls):
# count the different suits list length and returning true / false
return True if len(ls) == 5 or len(ls) > 5 else False
"""*********************************************************************************************
# This function will take cards values ad argument and return true / false
**********************************************************************************************"""
def straight(card_val):
# Calling list_number_asg method to convert non integer values in to integer
card_val = list_number_asg(card_val)
# Casting strings values to integer
for o in range(len(card_val)):
y = card_val[o]
y = int(y)
card_val[o] = y
# sorting list
card_val.sort()
hit_count = 0
# Checking series of connective five cards
for o in range(len(card_val)-1):
if card_val[o]+1 == card_val[o+1]:
hit_count += 1
if hit_count == 4:
return True
else:
hit_count = 0
return False
"""*********************************************************************************************
# Three of Kind function will take list of card values as argument and return True / False
**********************************************************************************************"""
def three_of_kind(card_val):
# Calling list_number_asg method to convert non integer values in to integer
card_val = list_number_asg(card_val)
# Casting strings values to integer
for o in range(len(card_val)):
y = card_val[o]
y = int(y)
card_val[o] = y
# Sorting list in descending order
card_val.sort(reverse=True)
for j in range(len(card_val)):
return True if card_val.count(card_val[j]) == 3 else False
"""*********************************************************************************************
# The Two pair function will take values list as argument and returns true / false
**********************************************************************************************"""
def two_pair(card_ls):
# Calling function to count number of same cards and storing it into new list
if len(cards_count_list) == 0:
face_values_count(card_ls)
# Coping global list into local list for alterations
cards_cnt_ls = cards_count_list[:]
# Finding Maximum occurrence of a card
if max(cards_cnt_ls) == 2:
# Removing maximum value card from count list
cards_cnt_ls.remove(max(cards_cnt_ls))
loop_count = len(cards_cnt_ls)
# Checking next card occurrence in count list
for j in range(len(cards_cnt_ls)):
if cards_cnt_ls[j] == 2 or cards_cnt_ls[j] > 2:
break
else:
loop_count -= 1
return False if loop_count == 0 else True
else:
return False
"""*********************************************************************************************
# pair function will take cards list as argument and return true / false
**********************************************************************************************"""
def pair(card_ls):
# Calling function to count number of same cards and storing it into new list
if len(cards_count_list) == 0:
face_values_count(card_ls)
# Coping global list into local list for alterations
cards_cnt_ls = cards_count_list[:]
# Finding Maximum occurrence of a card and returning true / false
return True if max(cards_cnt_ls) == 2 else False
"""*********************************************************************************************
# The High card function will take cards Values as argument and return true / false
**********************************************************************************************"""
def high_card(card_val):
# Calling list_number_asg method to convert non integer values in to integer
card_val = list_number_asg(card_val)
# Casting strings values to integer
for o in range(len(card_val)):
y = card_val[o]
y = int(y)
card_val[o] = y
return input_list[card_val.index(max(card_val))]
def main():
global input_list
print("Kindly Enter input in this form (TH JH QC QD QS QH KH 9H 2S 6S)")
item = ""
try:
item = raw_input("Enter your cards : ")
except ValueError:
print("Kindly Enter Correct Values !!!! ")
li = item.split()
input_list = li[:]
initial_fun()
if royal_flush(hearts_list):
print (str(input_list) + " : Best Hand : " + "Royal Flush of Hearts")
elif royal_flush(spades_list):
print (str(input_list) + " : Best Hand : " + "Royal Flush of Spades")
elif royal_flush(clubs_list):
print (str(input_list) + " : Best Hand : " + "Royal Flush of Clubs")
elif royal_flush(diamonds_list):
print (str(input_list) + " : Best Hand : " + "Royal Flush of Diamonds")
elif straight_flush(hearts_list):
print (str(input_list) + " : Best Hand : " + "Straight Flush of Hearts Cards")
elif straight_flush(spades_list):
print (str(input_list) + " : Best Hand : " + "Straight Flush of Hearts Cards")
elif straight_flush(clubs_list):
print (str(input_list) + " : Best Hand : " + "Straight Flush of Hearts Cards")
elif straight_flush(diamonds_list):
print (str(input_list) + " : Best Hand : " + "Straight Flush of Hearts Cards")
elif four_of_a_kind(cards_values):
print (str(input_list) + " : Best Hand : " + "Four of a kind")
elif full_house():
print (str(input_list) + " : Best Hand : " + "Full House")
elif flush(hearts_list):
print (str(input_list) + " : Best Hand : " + "Flush of Hearts")
elif flush(spades_list):
print (str(input_list) + " : Best Hand : " + "Flush of Spades")
elif flush(clubs_list):
print (str(input_list) + " : Best Hand : " + "Flush of clubs")
elif flush(diamonds_list):
print (str(input_list) + " : Best Hand : " + "Flush of diamonds")
elif straight_flush(input_list):
print (str(input_list) + " : Best Hand : " + "Straight")
elif three_of_kind(cards_values):
print(str(input_list) + " : Best Hand : " + "Three of A Kind")
elif two_pair(cards_values):
print (str(input_list) + " : Best Hand : " + "Two Pairs")
elif pair(cards_values):
print(str(input_list) + " : Best Hand : " + "Pair")
else:
print (str(input_list) + " : Best Hand : " + "High Card : ")
main()
|
14,527 | 569c9c1ccc71a88e470668ad95df03c0c12cc8d4 | import datetime
#import time
#import caldendar
dt_now = datetime.datetime.now()
print(dt_now)
print("Year:", dt_now.year)
print("Month:", dt_now.month)
print("Week:", dt_now.weekday())
tdelta = datetime.timedelta(days= 5)
print("-5 days:", dt_now - tdelta)
tdelta = datetime.timedelta(seconds= 5)
print("+5 seconds:", dt_now + tdelta)
|
14,528 | 35fac0d3c95d1e8ac118b12d4c9d9845e1313cfe | import yaml
def get_data_with_file(file_name):
with open('./data/' +file_name+ '.yml','r')as f:
return yaml.load(f,Loader=yaml.FullLoader)
|
14,529 | e97ab73d4e9281a5269f64d6706e22151c3c081b | #!/usr/bin/env python
#coding: utf-8
import hashlib
import os
def md5(filename):
'''
计算文件的 md5 值
'''
if os.path.isfile(filename):
hash_md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
else:
return "{} is not exists.".format(filename)
if __name__ == "__main__":
print md5("/opt/saltapi.p")
|
14,530 | e4b0f7f5e39228036189a8b78747fc8605ee00ef | import re
op1=open("Book1.txt","r")
book1=op1.read()
longest1=0
for i in book1.split("."):
b=i.split()
for re in range(len(b)):
if len(b[re]) > longest1:
longest1 = len(b[re])
longest_word1 = b[re]
print("Longest word from book1 is : ",longest_word1)
op2=open("Book2.txt","r")
book2=op2.read()
A=[]
longest2=0
for i in book2.split('.'):
b=i.split()
for re in range(len(b)):
if len(b[re]) > longest2:
longest2 = len(b[re])
longest_word2 = b[re]
print("Longest word from book2 is : ",longest_word2)
op3=open("Book3.txt","r")
book3=op3.read()
longest3=0
for i in book3.split('.'):
b=i.split()
for re in range(len(b)):
if len(b[re]) > longest3:
longest3 = len(b[re])
longest_word3 = b[re]
print("Longest word from book1 is : ",longest_word3)
if (longest_word1>longest_word2>longest_word1):
print("biggest word is",longest_word1)
elif (longest_word2>longest_word3):
print("Biggest word is",longest_word2)
else:
print("Biggest word is ",longest_word3)
#ma=max(longest_word1,longest_word2,longest_word3)
#print(ma) |
14,531 | 81031cbf5a53e66589f346db24405975e2683561 | import tkinter as tk
counter = 0
def counter_label(label):
def count():
global counter
counter+=1
label.config(text=str(counter))
label.after(1000,count)
count()
root = tk.Tk()
root.title("Counting The Clicks")
label = tk.Label(root, fg="green")
label.pack()
counter_label(label)
btn = tk.Button(root, text='STOP', width=25, command= root.destroy).pack()
root.mainloop()
|
14,532 | 28d38fb90aee0f28ba944b29c8c3be1b881ada9d | from Procedure import procedure
import Procedures_Motion
import Procedures_Pumps
class PrintLine(procedure):
def Prepare(self):
self.name = 'PrintLine'
self.requirements['startpoint']={'source':'apparatus', 'address':'', 'value':'', 'desc':'Reference point'}
self.requirements['endpoint']={'source':'apparatus', 'address':'','value':'', 'desc':'Point relative to reference position'}
self.requirements['material']={'source':'apparatus', 'address':'','value':'', 'desc':'material to be printed'}
self.testmove = Procedures_Motion.RefRelLinearMotion(self.apparatus, self.executor)
self.testpumpon = Procedures_Pumps.PumpOn(self.apparatus, self.executor)
self.testpumpoff = Procedures_Pumps.PumpOff(self.apparatus, self.executor)
def Plan(self):
pumpname = self.apparatus.findDevice({'descriptors':['pump',self.requirements['material']['value']] })
motionname = self.apparatus.findDevice({'descriptors':'motion'})
runmove = self.apparatus.GetEproc(motionname, 'Run')
self.testmove.Do({'relpoint':self.requirements['startpoint']['value'], 'refpoint':self.apparatus['information']['alignments']['startpoint'], 'speed':10})
runmove.Do()
self.testpumpon.Do({'mid_time':1, 'pumpon_time':2, 'name':pumpname})
self.testmove.Do({'relpoint':self.requirements['endpoint']['value'], 'refpoint':self.apparatus['information']['alignments']['startpoint'], 'speed':10})
runmove.Do()
self.testpumpoff.Do({'mid_time':1, 'pumpoff_time':3, 'name':pumpname})
|
14,533 | fd3156a0872682aa9a65a6b586c0f891460cb2a5 | from __future__ import division
#!/usr/bin/python3
# idVendor 0x0b9c
# idProduct 0x0315
import binascii
import time
import vlc
import numpy as np
forestFile = "/home/pi/PulsePlayer/sounds/jura.wav"
cityFile = "/home/pi/PulsePlayer/sounds/miestas.wav"
cat = vlc.MediaPlayer(cityFile)
squirrel = vlc.MediaPlayer(forestFile)
def ByteToHex( byteStr ):
"""
Convert a byte string to it's hex string representation e.g. for output.
"""
# Uses list comprehension which is a fractionally faster implementation than
# the alternative, more readable, implementation below
#
# hex = []
# for aChar in byteStr:
# hex.append( "%02X " % ord( aChar ) )
#
# return ''.join( hex ).strip()
return ''.join([ "%02X" % ord( x ) for x in byteStr ] ).strip()
def hex2dec (hex):
result_dec = int(hex, 0)
return result_dec
device = "/dev/hidraw5"
f = open(device, 'r')
def checkPulse():
s = f.read(5)
byte1 = ByteToHex(s[4]+s[3])
pulse = int(str(byte1), base=16)
bpm = (1000*60/pulse)
return bpm
avgArray = checkPulse()*np.ones((10, 1))
previousMean = np.mean(avgArray)
counter = 0
while 1:
bpm = checkPulse()
if (bpm < 120) and (bpm > 30):
counter+=1
print(counter)
if counter > 10:
avgArray = np.roll(avgArray, -1)
avgArray[0] = bpm
mean = np.mean(avgArray)
print(mean, "mean")
print(previousMean, "PREVIOUS")
if (mean - previousMean) >= 1:
squirrel.stop()
print("CAT")
cat.play()
time.sleep(3)
previousMean = mean
#bpm = checkPulse()
#if (bpm <= 80):
# cat.stop()
#break
elif (mean - previousMean) < -1:
cat.stop()
print("SQUIRREL")
squirrel.play()
time.sleep(3)
#bpm = checkPulse()
#if (bpm > 80):
# squirrel.stop()
#break
# break
previousMean = mean
else:
print("WAITING")
bpm = checkPulse()
time.sleep(0.1)
print("BPM:", bpm)
print("done")
|
14,534 | 9b5d7f54d23bb62ada83125cd772451235ddf88e | # List of icons
# http://kml4earth.appspot.com/icons.html
import pygmaps
# lat_long_list = [(30.3358376, 77.8701919), (30.307977, 78.048457), (30.3216419, 78.0413095), (30.3427904, 77.886958), (30.378598, 77.825396), (30.3548185, 77.8460573), (30.3345816, 78.0537813), (30.387299, 78.090614), (30.3272198, 78.0355272), (30.3840597, 77.9311923), (30.4158, 77.9663), (30.340426, 77.952092), (30.3984348, 78.0747887), (30.3431313, 77.9555512), (30.273471, 77.9997158)]
lat_long_dict = {(30.3358376, 77.8701919): 1, (30.307977, 78.048457): 2, (30.3216419, 78.0413095): 1, (30.3427904, 77.886958): 2, (30.378598, 77.825396): 1, (30.3548185, 77.8460573): 2, (30.3345816, 78.0537813): 1, (30.387299, 78.090614): 2, (30.3272198, 78.0355272): 1, (30.3840597, 77.9311923): 2, (30.4158, 77.9663): 1, (30.340426, 77.952092): 2, (30.3984348, 78.0747887): 1, (30.3431313, 77.9555512): 2, (30.273471, 77.9997158): 1}
icon_dict = {1:'http://maps.google.com/mapfiles/kml/pal2/icon40.png', 2:'http://maps.google.com/mapfiles/kml/pal3/icon33.png'}
mymap3 = pygmaps.pygmaps(30.3164945, 78.03219179999999, 15)
# ls = []
count = 0
for key, value in lat_long_dict.items():
# add a point into a map
# 1st argument is latitude
# 2nd argument is longitude
# 3rd argument is icon to be used
# 4th argument is colour of the point showed in thed map
# using HTML colour code e.g.
# red "# FF0000", Blue "# 0000FF", Green "# 00FF00"
print(icon_dict[value])
mymap3.addpoint(key[0], key[1], icon_dict[value], "#FF0000")
mymap3.draw('pygmap3.html')
|
14,535 | 3c4a1302f2b6684d9178e116294bef0478f22e72 | from datetime import datetime
from stock import stock
import math
import time
import webbrowser
import netsvc
import openerp.exceptions
from osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class absensi(osv.osv):
_name = 'absensi'
_description = 'absensi'
_inherit = ['mail.thread']
_columns = {
'name':fields.char('Name',required=True,track_visibility="onchange"),
'employee_id':fields.many2one('hr.employee','Employee',required=True,ondelete='cascade',onupdate='cascade'),
'datetime':fields.date('Date',required=True,track_visibility="onchange"),
'state':fields.selection([('draft','Draft'),('approve','Approved'),('done','Done'),('cancel','Canceled')],string="State",track_visibility="always"),
}
_defaults={
'datetime':time.strftime('%Y-%m-%d'),
'state':'draft'
}
def Action_Approved(self,cr,uid,ids,context={}):
res = False
absensi_obj = self.pool.get("absensi")
if absensi_obj.write(cr,uid,ids,{'state':'approve'},context=context):
res = True
return res
def Action_Done(self,cr,uid,ids,context={}):
res = False
absensi_obj = self.pool.get("absensi")
if absensi_obj.write(cr,uid,ids,{'state':'done'},context=context):
res = True
return res
def Action_Canceled(self,cr,uid,ids,context={}):
res = False
absensi_obj = self.pool.get("absensi")
if absensi_obj.write(cr,uid,ids,{'state':'cancel'},context=context):
res = True
return res
class employeeinherit(osv.osv):
_inherit ='hr.employee'
_columns = {
'log_absensi':fields.one2many('absensi','employee_id',string='Absensi'),
}
|
14,536 | 7b82092953acc32dc12c64cc47b2f3fd86717e59 | #模块的查找顺序 内存:sys.modules->内建->最后找sys.path
import sys
print(sys.path)#模块查找顺序
#加载不在当前路径的模块
sys.path.append('D:\\python\\project\\面向对象')
import new
|
14,537 | 5c992638f24efa703f1a97df0939dc4ecd595178 | from django.shortcuts import render, redirect
# Create your views here.
def index(request):
# first time user experience
if 'username' not in request.session:
request.session['username'] = 'New User'
request.session['users'] = []
context = {
'username': request.session['username'],
'users': request.session['users'],
'colors': ['green', 'red', 'blue']
}
return render(request, 'index.html', context)
def create(request):
if request.method == 'POST':
request.session['username'] = request.POST['username']
request.session['users'].append(request.POST['username'])
return redirect('/')
def game(request):
if 'username' not in request.session:
request.session['username'] = 'New User'
# check to see if I have started game
if 'count' not in request.session:
request.session['count'] = 0
context = {
'username': request.session['username'],
'times': request.session['count']
}
return render(request, 'game.html', context)
def click(request):
# HANDLE CLICK
request.session['count'] += 1
return redirect('/game')
def reset(request):
# HANDLE RESET
# REMOVE EVERYTHING FROM SESSION
# request.session.clear()
# REMOVE ONE THING FROM SESSION
del request.session['count']
return redirect('/game') |
14,538 | 41d825d92b57f0acad66e503e4b7e2fee877260d | from random import random, randint
import time
def time_complite(func):
def proc(n):
start_time = time.time()
result = func(n)
finish_time = time.time()
delta = round(finish_time - start_time, 4)
print(delta)
return result
return proc
@time_complite
def point_line(number):
points = [random() * randint(1, 10) for _ in range(number)]
points.sort()
print(points)
result = {}
while len(points) != 0:
i = min(points)
while len(points) != 0 and i + 1 > points[0]:
points.pop(0)
result[(i, i + 1)] = result.get((i, i + 1), 0) + 1
return result
if __name__ == '__main__':
print(point_line(n=10))
|
14,539 | ab0a77ebbea8a9382578288a1bef2e13938168a2 | # Generated by Django 3.0.14 on 2021-05-29 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Book', '0003_auto_20210530_0119'),
]
operations = [
migrations.RemoveField(
model_name='books',
name='visits',
),
migrations.AlterField(
model_name='books',
name='summary',
field=models.TextField(blank=True, max_length=500),
),
]
|
14,540 | 4cb4d4421c7f6fb9e73fcf18375113af345ea3fd | # -*- encoding:utf-8 -*-
"""
company:IT
author:pengjinfu
project:js
time:2020.5.1
"""
from info import user, pwd
import requests
import execjs
import asyncio
class Login():
def __init__(self):
self.session = requests.Session()
async def get_js_info(self):
with open('fangtianxia.js', 'r') as file:
js = file.read()
results_pwd = execjs.compile(js).call('getPwd', pwd)
return results_pwd
async def handle_request(self, url, data=None, headers=None):
response = self.session.post(url, data=data, headers=headers)
if response.status_code == 200:
return response.json()
async def login(self):
password = await self.get_js_info()
url = 'https://passport.fang.com/login.api'
data = {'uid': user,
'pwd': password,
'Service': ' soufun-passport-web',
'AutoLogin': ' 1'}
headers = {
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Host': 'passport.fang.com',
'Origin': 'https://passport.fang.com',
'Referer': 'https://passport.fang.com/?backurl=http%3a%2f%2fmy.fang.com%2f',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0',
}
response = await self.handle_request(url, data, headers)
return response
async def run(self):
print(await self.login())
if __name__ == '__main__':
login = Login()
task = login.run()
asyncio.get_event_loop().run_until_complete(task)
|
14,541 | 6ec2edac4e434a9782fb45ee0ab059bbf4819c05 | import json
import psycopg2
import os
import time
import re
import random
import sys
start_time = time.perf_counter()
MYDSN = "dbname=movie_json_test user=movie_json_user password=Change_me_1st! host=127.0.0.1";
cnx = psycopg2.connect(MYDSN)
cnx.set_session(readonly=True, autocommit=True)
cursor = cnx.cursor()
cursor2 = cnx.cursor()
# actors list stored here for random searches
list_actors=[]
#titles list here for random searches
list_tiles=[]
#list of ids
list_ids=[]
#list of ids
list_directors=[]
movie_years= dict()
if len(sys.argv) == 1:
time_to_run = 300
else :
print ("Run Time Set to: " + sys.argv[1])
time_to_run = int(sys.argv[1])
current_time = 0
debug = 0
qry = 0;
load_actors = "select actor_name from movies_normalized_actors where actor_name != ''"
load_titles = "select title, imdb_id, year from movies_normalized_meta"
load_ids = "select imdb_id from movies_normalized_meta"
load_directors = "select director from movies_normalized_director"
find_best_movies_by_year = "select imdb_id, imdb_rating, title from movies_normalized_meta where year = %s order by imdb_rating desc"
find_movie_ratings_count_for_years = "select count(*), imdb_rating from movies_normalized_meta where year > %s and year < %s group by imdb_rating"
find_movies_by_years = "select count(*), year from movies_normalized_meta group by year order by year desc"
find_movies_and_directors_years_fuzzy = "select year, director, count(*), avg(imdb_rating) from movies_normalized_director a join movies_normalized_meta b on a.ai_myid = b.ai_myid where director like %s group by year, director "
find_movies_and_directors_by_years = "select year, director, count(*), avg(imdb_rating) from movies_normalized_director a join movies_normalized_meta b on a.ai_myid = b.ai_myid where year > %s and year < %s group by year, director "
actor_movie_count_by_year = "select actor_name, year, count(*), avg(imdb_rating) from movies_normalized_meta a, movies_normalized_cast b, movies_normalized_actors c where a.ai_myid=b.ai_myid and b.ai_actor_id = c.ai_actor_id and actor_name = %s group by year, actor_name"
actor_movie_count_by_year_fuzzy = "select actor_name, year, count(*), avg(imdb_rating) from movies_normalized_meta a, movies_normalized_cast b, movies_normalized_actors c where a.ai_myid=b.ai_myid and b.ai_actor_id = c.ai_actor_id and actor_name = %s group by year, actor_name"
actor_movie_count_by_year_range = "select actor_name, year, count(*), avg(imdb_rating) from movies_normalized_meta a, movies_normalized_cast b, movies_normalized_actors c where a.ai_myid=b.ai_myid and b.ai_actor_id = c.ai_actor_id and year between %s and %s group by year, actor_name"
find_country_movies_single_year = "select country, count(*) from movies_normalized_meta where year = %s group by country"
find_country_movies_range_year = "select year, country, count(*) from movies_normalized_meta where year between %s and %s group by year, country"
print("Loading Actors...")
cursor.execute(load_actors)
for actor_name in cursor:
list_actors.append(actor_name);
print("Loading Titles & ID's...")
cursor.execute(load_titles)
for (movie_titles, imdb_id, mv_year) in cursor:
list_tiles.append(movie_titles);
list_ids.append(imdb_id);
movie_years[imdb_id] = mv_year
print("Loading Directors...")
cursor.execute(load_directors)
for (director) in cursor:
list_directors.append(director);
start_time = time.perf_counter()
print("Starting Querying Data for "+ str(time_to_run) + " second ...")
while current_time < time_to_run :
current_time = time.perf_counter() - start_time
search_actor = random.choice(list_actors)
if debug == 1 :
print("Actor: " + str(search_actor[0]))
cursor.execute(actor_movie_count_by_year, search_actor)
qry = qry + 1
junk = cursor.fetchall()
cursor.execute(actor_movie_count_by_year_fuzzy, (search_actor[0][0:6]+'%',))
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( actor_movie_count_by_year_range, (searchyear, searchyear+5))
qry = qry + 1
junk = cursor.fetchall()
search_director = random.choice(list_directors)
cursor.execute(find_movies_and_directors_years_fuzzy, (search_director[0][0:6]+'%',))
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( find_movies_and_directors_by_years, (searchyear, searchyear+5))
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( find_movie_ratings_count_for_years, (searchyear, searchyear+5))
qry = qry + 1
junk = cursor.fetchall()
cursor.execute( find_movies_by_years)
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( find_best_movies_by_year, (searchyear,))
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( find_country_movies_single_year, (searchyear,))
qry = qry + 1
junk = cursor.fetchall()
searchyear = random.randrange(1945,2019)
cursor.execute( find_country_movies_range_year, (searchyear, searchyear+5))
qry = qry + 1
junk = cursor.fetchall()
# print("Query: " + str(qry))
print("Ending Querying Data After "+ str(current_time) + " seconds ... Finished " + str(qry) + " queries")
|
14,542 | 317d791a182015deb0b075dcde8db32ed2222c2c | import sys, os
reverseSelection=True
def separate(inPath, outPath, listFname):
readPrefixes=[]
with open(listFname) as f:
for line in f:
readPrefixes.append( line.split()[0].upper() )
for fname in os.listdir(inPath):
isInList= fname.split(".")[0] in readPrefixes
if (not reverseSelection and isInList) or (reverseSelection and not isInList):
print(fname)
fnameOld= os.path.join(inPath, fname)
newFname= os.path.join(outPath, fname)
os.symlink(fnameOld, newFname)
if __name__=="__main__":
if len(sys.argv)!=4:
raise ValueError("Incorrect number of arguments")
inPath= os.path.abspath(os.path.expanduser(sys.argv[1]))
outPath= os.path.abspath(os.path.expanduser(sys.argv[2]))
listFname= os.path.abspath(os.path.expanduser(sys.argv[3]))
separate(inPath, outPath, listFname)
|
14,543 | d2aa4d653fbd70455015bd8f82c863db699b90e0 | import logging
import time
from modules.core.props import Property, StepProperty
from modules.core.step import StepBase
from modules import cbpi
@cbpi.step
class BringToTempOverTime(StepBase):
# Properties
kettle = StepProperty.Kettle("Kettle")
temp = Property.Number("Temperature", configurable=True)
steps = Property.Number("Number of Steps", configurable=True)
timer = Property.Number("Timer in Minutes", configurable=True)
stepsLeft = 0
timeInc = 0
currentTargetTemp = 0.0
def init(self):
cbpi.app.logger.info("BringToTempOverTime Init called")
# Start the timer
if self.is_timer_finished() is None:
cbpi.app.logger.info("BringToTempOverTime Starting timer for %d minutes", int(self.timer))
self.start_timer(int(self.timer) * 60)
# Set the steps left
self.stepsLeft = int(self.steps)
# Set the time increment
self.timeInc = ((int(self.timer) * 60) / int(self.steps))
def reset(self):
cbpi.app.logger.info("BringToTempOverTime Reset called")
self.stop_timer()
self.set_target_temp(0, self.kettle)
def finish(self):
cbpi.app.logger.info("BringToTempOverTime Finish called")
self.set_target_temp(0, self.kettle)
def execute(self):
# This method is execute in an interval
#cbpi.app.logger.info("BringToTempOverTime Execute called")
# Check if Target Temp is reached
if self.get_kettle_temp(self.kettle) >= float(self.temp):
cbpi.app.logger.info("BringToTempOverTime Kettle temp is >= temp")
# If timer is finished go to the next step
if self.is_timer_finished() == True:
cbpi.app.logger.info("BringToTempOverTime Target temp reached and timer finished, moving to next step")
self.next()
elif self.currentTargetTemp <= 0.0:
# If we are out of steps just set the final temp
if int(self.stepsLeft) <= 0:
cbpi.app.logger.info("BringToTempOverTime Zero steps left")
self.currentTargetTemp = self.temp
else:
cbpi.app.logger.info("BringToTempOverTime Kettle currentTargetTemp is <= 0.0")
# Calculate the increment of temperature in n steps
perStepTemp = ((float(self.temp) - self.get_kettle_temp(self.kettle)) / int(self.stepsLeft))
self.currentTargetTemp = self.get_kettle_temp(self.kettle) + perStepTemp
cbpi.app.logger.info("BringToTempOverTime New target temp calculated from perStepTemp of %f with new temp of %f", float(perStepTemp), float(self.currentTargetTemp))
self.stepsLeft = int(self.stepsLeft) - 1
cbpi.app.logger.info("BringToTempOverTime Steps left is %d", int(self.stepsLeft))
# set target temp
cbpi.app.logger.info("BringToTempOverTime Setting target temp")
cbpi.app.logger.info("BringToTempOverTime To %f", self.currentTargetTemp)
self.set_target_temp(self.currentTargetTemp, self.kettle)
elif self.timer_remaining() <= int(self.stepsLeft) * int(self.timeInc):
cbpi.app.logger.info("BringToTempOverTime Time for step has expired, moving to next target temp and step")
self.currentTargetTemp = 0.0
|
14,544 | 9df228e4cbefc40db6b37607eaf473b8e94df79a | from sys import argv
from os.path import exists
script, from_file, to_file = argv # put cmd line args into vars
print "Copying file from %s to %s." % (from_file, to_file)
indata = open(from_file).read() # open and read from_file, put contents in indata
out_file = open(to_file, 'w') # open the file in write mode
out_file.write(indata) # write the contents of indata to out_file
out_file.close()
print "Alrght, all done." |
14,545 | ab40f147980e0e69f3bdb600f2d74a432e4b4792 | import requests
from bs4 import BeautifulSoup
import re
url = "http://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo, "html.parser")
for link in soup.find_all("a"): # find_all()
# print(link)
print(link.get('href'))
print('-'*50)
print("含course属性的a标签:\n",soup.find_all('p','course'))
print("link1:\n",soup.find_all(id='link1'))
print('-'*50)
print("使用正则re.compile():\n",soup.find_all(id=re.compile('link'))) |
14,546 | 8d1e6a54cd1df4e57efe096eb222f0ab89cf7f15 | #!/usr/bin/python3 -u
import os
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
MQTT_HOST=os.getenv("MQTT_HOST", "test.mosquitto.org").strip()
print("Will connect to "+str(MQTT_HOST))
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("uservicehack/#") # the '#' wildcard matches all topics below this one
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + msg.payload.decode("ascii"))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_HOST, 1883, 60)
client.loop_start()
# Publish a single message on the topic
publish.single("uservicehack/kittens", "minikatz are great!", hostname=MQTT_HOST)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
while True:
1
client.loop_stop() # Never gets here
|
14,547 | cb4b4bab8e328e3dbaf8fdce9083f47947ae56b0 | r, c = map(int, input().split())
a = [list(map(int, input().split())), list(map(int, input().split()))]
print(a[r - 1][c - 1]) |
14,548 | cd78d220a67b73d774559c42c395d9662e125ebc | import serial
#import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#from datetime import datetime
#dt = datetime.now()
#dt.microsecond
import csv
ser = serial.Serial('COM13', 115200)
with open('middle.csv', 'w', newline ='') as f:
writer = csv.writer(f)
writer.writerow(['time','snsr_11','snsr_12','snsr_13','snsr_14',
'snsr_21','snsr_22','snsr_23','snsr_24',
'snsr_31','snsr_32','snsr_33','snsr_34',
'snsr_41','snsr_42','snsr_43','snsr_44','interupt'])
dummy = np.zeros((4,4))
fig, ax = plt.subplots()
line = ax.imshow(dummy, vmin=-50, vmax=50)
def init():
line.set_array(np.zeros((4,4)))
return line,
def animate(i):
s = str(ser.readline(),'utf-8')
st = s.rstrip().split(',')
with open('middle.csv', 'a', newline ='') as f:
writer = csv.writer(f)
writer.writerow(st)
lis = [float(x) for x in st[1:17]]
a = np.array(lis).reshape(4,4)
line.set_array(a)
#line.text(0,-0.6,s)
#line.set_title(s)
return line,
ani = animation.FuncAnimation(
fig, animate, init_func=init, interval=5, blit=True, save_count=10)
plt.show()
|
14,549 | 444a680c9df110d8fab919def46693c14619288f | # """
# 装饰器的功能:
# 1.引入日志
# 2.函数执行时间的统计
# 3.执行函数前预备处理,如flask的请求钩子
# 4.执行函数后清理功能
# 5.权限校验等场景,如登陆验证装饰器
# 6.缓存
# """
#
import time
from functools import wraps
import math
def timefunc(func):
"""统计一个函数的运行时间"""
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
res = func(*args, **kwargs) # 调用被装饰函数
end_time = time.time()
print("function %s runtime is %s" % (func.__name__, end_time - start_time))
print(res)
# return res
return wrapper
# @timefunc
# def factorial(num):
# """计算一个正整数的阶乘"""
# if num == 0 or num == 1:
# return 1
# elif num > 1:
# return num * factorial(num-1)
#c
# # math.factorial(num) # 直接导库
#
# result = 1
# for i in range(1, num+1):
# result *= i
# return result
#
#
# if __name__ == '__main__':
# res = factorial(10)
# print(res)
# def clock(func):
# def clocked(*args):
# t0 = time.perf_counter()
# result = func(*args)
# elapsed = time.perf_counter() - t0
# name = func.__name__
# # arg_str = ', '.join(repr(arg) for arg in args)
# # print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))
# print('[%0.8fs] %s -> %r' % (elapsed, name, result))
# return result
#
# return clocked
@timefunc
def factorial(n):
def _factorial(n):
return 1 if n < 2 else n * _factorial(n - 1)
return _factorial(n)
@timefunc
def factorialloop(n):
res = 1
for i in range(2, n+1):
res = res * i
return res
if __name__ == '__main__':
factorial(50)
factorialloop(50)
|
14,550 | af27dae1dcf7dbbf940f7ddaf16114c0be263580 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, Date
engine = create_engine("mysql+mysqldb://root:cga3026@localhost:3306/gpoascen_crm", echo=False)
Session = sessionmaker(bind=engine)
Base = declarative_base()
class Modulos(Base):
__tablename__ = 'modulos'
idmodulo = Column(Integer, primary_key=True)
nombre = Column(String)
idtitulospermisos = Column(Integer)
def __init__(self, idmodulo, nombre, idtitulospermisos):
self.idmodulo = idmodulo
self.nombre = nombre
self.idtitulospermisos = idtitulospermisos
#print(type(engine))
#test de conexion
# 2 - generate database schema
Base.metadata.create_all(engine)
# 3 - create a new session
session = Session()
#Create register
#modulo = Modulos(None,"test","1")
# 9 - persists data
#session.add(modulo)
#get data
modulos = session.query(Modulos).all()
modulos = session.query(Modulos).filter(Modulos.idmodulo > 10).all()
modulos = session.query(Modulos).filter(Modulos.nombre.like("%credito%")).all()
modulos = session.query(Modulos).filter(Modulos.nombre.like("%es%")).filter(Modulos.idmodulo > 10).all()
moduloUp = session.query(Modulos).filter(Modulos.nombre == "test").first()
if moduloUp :
print(str(moduloUp.idmodulo) + " " + moduloUp.nombre )
moduloUp.nombre = "Se cambia Con SQLAlchemy"
for modulo in modulos :
print(modulo.nombre)
# 10 - commit and close session
session.commit()
session.close() |
14,551 | 52b25f7e697b550394ef5f96bdcb77422ba75d4f | def paintingPlan( n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
if k==n*n:
return 1
count=0
def get(num):
num1 =1
for i in range(1,num+1):
num1 =num1*i
num2 =1
for j in range(n,n-num,-1):
num2=num2*j
return num2//num1
for i in range(0,n+1):
for j in range(0,n+1):
if i*n+j*n-i*j==k:
if i==0 and j!=0:
count += get(j)
elif i!=0 and j==0:
count+=get(i)
else:
count += get(i)*get(j)
return count
print(paintingPlan(4,13)) |
14,552 | 9d42c369f0bc85153cd5c91b67506c183b57e8ec | import logging
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic.edit import (CreateView, DeleteView, FormView,
UpdateView)
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django_filters.views import FilterView
from django import forms
from main import models
logger = logging.getLogger(__name__)
####
class AidRequestCreateView(LoginRequiredMixin, CreateView):
model = models.main.AidRequest
fields = [
"type",
"details",
"equipment_brand",
"equipment_model",
"equipment_serialno",
]
success_url = reverse_lazy("aidrequestforhospital_list")
def form_valid(self, form):
obj = form.save(commit=False)
hospital = self.request.user.hospital_set.all().first()
obj.hospital = hospital
obj.save()
return super().form_valid(form)
class AidRequestUpdateView(LoginRequiredMixin, UpdateView):
model = models.main.AidRequest
fields = [
"type",
"details",
"equipment_brand",
"equipment_model",
"equipment_serialno",
]
success_url = reverse_lazy("aidrequestforhospital_list")
def get_queryset(self):
hospitals = models.main.Hospital.objects.filter(user=self.request.user)
return self.model.objects.filter(hospital__in=hospitals)
class AidRequestDeleteView(LoginRequiredMixin, DeleteView):
model = models.main.AidRequest
success_url = reverse_lazy("aidrequestforhospital_list")
def get_queryset(self):
hospitals = models.main.Hospital.objects.filter(user=self.request.user)
return self.model.objects.filter(hospital__in=hospitals)
@login_required
def aidrequest_close(request, pk):
hospitals = models.main.Hospital.objects.filter(user=request.user)
aid = models.main.AidRequest.objects.filter(hospital__in=hospitals).get(pk=pk)
aid.closed = True
aid.save()
return redirect("aidrequestforhospital_list")
class AidRequestDetailForHospital(DetailView):
model = models.main.AidRequest
template_name = "main/aidrequest_detail_hospital.html"
class AidRequestListForHospital(ListView):
model = models.main.AidRequest
template_name = "main/aidrequest_list_hospital.html"
def get_queryset(self):
hospitals = models.main.Hospital.objects.filter(user=self.request.user)
return self.model.objects.filter(hospital__in=hospitals)
class SignupStep2Form(forms.Form):
name = forms.CharField(max_length=32)
phone = forms.CharField(max_length=32)
hospital_name = forms.CharField(max_length=32)
hospital_address = forms.CharField(max_length=32)
class SignupStep2(LoginRequiredMixin, FormView):
success_url = reverse_lazy("aidrequestforhospital_list")
form_class = SignupStep2Form
template_name = "main/step2_form.html"
def form_valid(self, form):
self.request.user.first_name = form.cleaned_data['name']
self.request.user.phone = form.cleaned_data['phone']
self.request.user.save()
h, _ = models.main.Hospital.objects.get_or_create(name=form.cleaned_data['hospital_name'])
h.address = form.cleaned_data['hospital_address']
h.user = self.request.user
h.save()
return super().form_valid(form)
###
class AidRequestDetailForDonor(DetailView):
model = models.main.AidRequest
class HospitalDetailForDonor(DetailView):
model = models.main.Hospital
class HospitalListForDonor(FilterView):
model = models.main.Hospital
filterset_fields = ["city"]
def get_queryset(self):
return self.model.objects.all()
class AidRequestListForDonor(FilterView):
model = models.main.AidRequest
filterset_fields = ["hospital__city"]
def get_queryset(self):
return self.model.objects.all().order_by("-updated_at")
def home(request):
if request.user.is_authenticated:
if request.user.first_name == "":
return redirect(reverse("signup_step2"))
else:
return redirect(reverse("aidrequestforhospital_list"))
return render(request, "home.html")
|
14,553 | f1133a749b0462ad646eef34bd159f49dcbf8bc9 | """
3. Массив размером 2m + 1, где m – натуральное число, заполнен случайным образом.
Найдите в массиве медиану. Медианой называется элемент ряда, делящий его на
две равные по длине части: в одной находятся элементы, которые не меньше медианы,
в другой – не больше медианы.
Задачу можно решить без сортировки исходного
массива.
Но если это слишком сложно, то используйте метод сортировки,
который не рассматривался на уроках: Шелла, Гномья, Кучей...
[5, 3, 4, 3, 3, 3, 3]
[3, 3, 3, 3, 3, 4, 5]
my_lst
new_lts
arr[m]
from statistics import median
[3, 4, 3, 3, 5, 3, 3]
left.clear()
right.clear()
m = 3
len = 7
i
left = []
right = []
left == right and
for i in
for
left == right
left.clear()
right.clear()
""" |
14,554 | 6c6393732c5279dcd256da72c6b14587424887a3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 10:36:49 2018
@author: hk
"""
import os
os.getcwd()
os.chdir('/Users/hk/Dropbox/TDI/Project')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def hIndex(citations):
h = 0
for x in citations:
if x >= h + 1:
h += 1
else:
break
return h
#reading in files
gp = pd.read_csv('gnum2000-2.csv')
cp = pd.read_csv('gnum2000-0.csv')
#making column names consistent
gp = gp.rename(columns={"pmids": "pid"})
#merging data
data = gp.merge(cp, how = 'left', on = 'pid')
data2 = data.sort_values(['gid', 'cites'], ascending=[True, False])
data2 = data.set_index('gid')
del data2['pid']
data3 = data2.groupby('gid').agg(['sum', 'mean', 'min', 'max', 'median'])
data3.columns = data3.columns.droplevel(0)
data3.head()
#calculating h index
#making lists to put in h index function
clist = data2.groupby('gid')['cites'].apply(list)
#calling hindex function over lists
hindex = []
for l in clist:
hindex.append(hIndex(l))
#adding h index to dataset
data3['hindex'] = hindex
data3.reset_index(level=0, inplace=True)
data3.head()
#saving dataset as a file:
data3.to_csv('allanalysis2000.csv', sep = ',', index = True)
#loading raw data from NIH
df = pd.read_csv('RePORTER_PRJ_C_FY2000.csv')
#parsing out grant id numbers
df['gnum'] = df['CORE_PROJECT_NUM'].str[3:]
#converting enddate to datetime
df['enddate'] = pd.to_datetime(df['BUDGET_END'])
#pulling specific columns from the raw data
rawgrantdata = pd.DataFrame({'gid' : df['gnum'],
'mech' : df['ACTIVITY'],
'center' : df['ADMINISTERING_IC'],
'centername' : df['IC_NAME'],
'enddate': df['enddate']})
#removing duplicates from the data
rawdata = rawgrantdata.drop_duplicates(subset = 'gid', keep = 'first')
#mergiing with calculated values
adata1 = data3.merge(rawdata, how = 'left', on = 'gid')
#removing missing values:
import numpy as np
adata2 = adata1.replace('', np.nan, inplace=False)
adata2.dropna(inplace=True)
adata2.to_csv('allanalysis2000_nomiss.csv', sep = ',', index = False)
adata2['year'] = adata2['enddate'].dt.strftime('%Y')
#frequencies:
#adata2.crosstab('mech', 'center')
print(adata2['mech'].value_counts())
print(adata2['center'].value_counts())
#subsetting to National Cancer Insititute
CAdata = adata2[adata2['center'] == 'CA']
GMdata = adata2[adata2['center'] == 'GM']
HLdata = adata2[adata2['center'] == 'HL']
top3 = top3.append(GMdata)
print(CAdata['year'].value_counts())
keep = []
for x in adata2['mech']:
if x in ('R01', 'T32', 'R29', 'F32', 'K08'):
keep.append(1)
else:
keep.append(0)
adata2['keep'] = keep
top5 = adata2[adata2['keep']==1]
catop5 = top35[top35['center']=='CA']
my_tab = pd.crosstab(index=top35["center"], # Make a crosstab
columns="mech")
#my_tab.index = ['CA', 'GM', 'HL']
my_tab.columns = ['R01', 'T32', 'R29', 'F32', 'K08']
my_tab
#plotting
x = [pd.to_datetime(d) for d in catop5['enddate']]
y = catop5['hindex']
cgroup = top35['center']
mgroup = catop5['mech']
ccolors = {'HL':'red', 'GM':'blue', 'CA':'green'}
mcolors = {'R01':'red', 'T32':'blue', 'R29':'green', 'F32': 'yellow', 'K08': 'black'}
plt.scatter(x, y, c=mgroup.apply(lambda x: mcolors[x]), alpha = 0.5)
plt.show()
gtop5 = top5.groupby(top5['mech']).agg(['mean', np.std])
data = pd.read_csv('allanalysis2000.csv')
|
14,555 | 40fb4c9eb801baed07c825e74d588d6d82ffa8f9 | import hashlib
def hashing(word) :
"""
The hashing function takes a string input and converts it into a hash using
the SHA256 hashing technique present in the hashlib library in python.
Arguments
word : a string
Return Value
ans : a string of hexadecimal digits corresponding to the SHA256 hashing
of the given word.
"""
ans = hashlib.sha256(word.encode())
return ans.hexdigest()
if __name__ == '__main__' :
print(hashing("Hello World!!!"))
"""
INPUT
Hello World!!!
OUTPUT
073f7397b078dca7efc7f9dc05b528af1afbf415d3caa8a5041d1a4e5369e0b3
"""
|
14,556 | ce7e97be76ef989a0a275defd42059eb37397d12 | from tkinter import Tk, Label, Radiobutton, StringVar
from PIL import Image, ImageTk
from tkinter import filedialog
from Filter import Filter
from MeanBlur import MeanBlur
from MedianBlur import MedianBlur
from GaussianBlur import GaussianBlur
from Translator import Translator
from Scaler import Scaler
from Rotator import Rotator
from Histogram import Histogram
from Canny import Canny
import cv2
class GUI:
var = []
path = ''
img = ''
filters = []
myLabel = ''
def __init__(self, master, title, resolution, filters):
self.master = master
self.var = StringVar(master)
self.filters = filters
master.title(title)
master.geometry(resolution)
self.path = self.getImagePath()
self.addFilterButtons(filters)
self.loadImage()
def loadImage(self):
img = ImageTk.PhotoImage(Image.open(self.path[0]).resize((500,500)))
self.img = cv2.imread(self.path[0], cv2.IMREAD_UNCHANGED)
label = Label(self.master, image = img)
label.image = img
label.place(x = 0, y= 40)
self.myLabel = label
def convertImage(self):
if (cv2.split(self.img)):
b,g,r = cv2.split(self.img)
img = cv2.merge((r,g,b))
print(img)
im = Image.fromarray(img)
im = im.resize((500,500))
imgtk = ImageTk.PhotoImage(image=im)
label = Label(self.master, image=imgtk)
label.image = imgtk
label.place(x = 0, y= 40)
self.myLabel = label
def selected(self):
i = 0
for filter in self.filters:
if (str(i) == self.var.get()):
# self.loadImage()
self.img = filter.applyFilter(self.img)
i = i+1
self.convertImage()
def getImagePath(self):
return filedialog.askopenfilenames()
def addFilterButtons(self, filters):
i = 0
xpos = 50
for filter in filters:
Radiobutton(self.master, text=filter.__class__.__name__, variable=self.var, value= i ,command= self.selected).place(x = xpos, y = 10 )
i = i+1
xpos = xpos+120
root = Tk()
filters = [MeanBlur(), MedianBlur(), GaussianBlur(), Translator(), Rotator(), Histogram(), Canny() ]
my_gui = GUI(root, "Window","900x900",filters)
root.mainloop()
|
14,557 | 8d1bf4226e7d59c47eeb7952914e3086efe43036 | from sys import maxsize
def Stack():
stack=[]
return stack
def isEmpty(stack):
return len(stack)==0
def push(stack,item):
stack.append(item)
print(item+" pushed to stack ")
def pop(stack):
if(isEmpty(stack)):
return str(-maxsize-1)
return stack.pop()
def top(stack):
if(isEmpty(stack)):
return str(-maxsize-1)
return stack[len(stack)-1]
stack=Stack()
push(stack,str(10))
push(stack, str(20))
push(stack, str(30))
print(pop(stack)+" popped from stack ")
print(top(stack)+" top element in stack ")
|
14,558 | 93d92daf4b287c08e82195404b50b04fa1dc82ba | from unittest.mock import Mock
import pytest
from rest_framework.exceptions import ValidationError
from rest_framework.status import (
HTTP_200_OK,
HTTP_401_UNAUTHORIZED,
HTTP_404_NOT_FOUND,
)
from api.serializers import ProjectSerializer
class TestProjectSerializer:
def test_validate_project_name_correct(self, mocker):
mocker_response = mocker.patch(
'api.serializers.GithubOrganizationApi.get_repo',
Mock,
)
mocker_response.status_code = HTTP_200_OK
assert ProjectSerializer().validate_project_name('test') == 'test'
def test_validate_project_name_not_exists(self, mocker):
mocker_response = mocker.patch(
'api.serializers.GithubOrganizationApi.get_repo',
Mock,
)
mocker_response.status_code = HTTP_404_NOT_FOUND
mocker_response.url = 'test_url'
with pytest.raises(
ValidationError,
match=(
"[ErrorDetail(string='Project(test) does not exists(test_url)', "
+ "code='invalid')]"
),
):
ProjectSerializer().validate_project_name('test')
def test_validate_project_name_not_authorize(self, mocker):
mocker_response = mocker.patch(
'api.serializers.GithubOrganizationApi.get_repo',
Mock,
)
mocker_response.status_code = HTTP_401_UNAUTHORIZED
mocker_response.url = 'test_url'
with pytest.raises(
ValidationError,
match=(
"[ErrorDetail(string='You are not authorized to add "
+ "this Project(test)', code='invalid')]"
),
):
ProjectSerializer().validate_project_name('test')
|
14,559 | 20670c03f0a6786b0fbfccb750f20791c95bdc4e | #!/tps/bin/python -B
import os, sys, json, re
from math import floor
import elasticsearch1, urllib3
from elasticsearch1 import helpers
pwd = os.getcwd()
sys.path.insert(0, '{}/msl-datalytics/src/'.format(pwd))
from spazz import *
import timeit
start = time.time()
# from msldatalytics.src.spazz import *
#from spazz import *
es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov', sniff_on_start=False)
# es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov',sniff_on_start=False)
urllib3.disable_warnings()
global index
index = 'mslice_db'
def main():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active' in child.lower()) or ('mahli merges' in child.lower())or ('sapp_rimu_data_collection' in child.lower()) or ('sam' in child.lower()):
seqIdDict['backboneType'].append('otherSci')
# if I can't identify it as one of the above, then print to screen to help find other problems, and also flag it as unidentified.
else:
unidentifiedBackbones.append(child)
if 'unidentified' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('unidentified')
# if I couldn't find a margin, then throw an error
if (ii == (len(result['_source']['Children'])-1) and marginsFound == 0):
if verbose:
print('')
print('ERROR: Unable to find a margin associated with ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('List of children for ' + seqId + ':')
print(result['_source']['Children'])
print('')
keepSeqId = False
noMarginFoundChildNames += result['_source']['Children']
numMissingMarginErrors += 1
continue
if keepSeqId:
# now query for actuals
hits, _ = spazzObj.get_as_run_sequences(seqids=[seqId])
# print("NEVER GOT HERE")
if (len(hits) >= 1):
actual_found = False
for kk, hit in enumerate(hits):
#actuals database doesn't have master sol. It has master seqID and execution start time. Can backsolve with those to determine mastersol:
# mstr00XXX is either sol 0XXX,1XXX, or 2XXX. execution times on 2164 or 2165 may be associated with master sol 2164.
# so borrow the first digit from execution time, and the last three from master sequence ID, and voila, a master sol number
actuals_temp_execution_sol = int(hits[kk]['start_lmst'][4:8])
mstrSeqId = int(hits[kk]['parent'][4:])
actuals_temp_master_sol = mstrSeqId+(actuals_temp_execution_sol//1000*1000)
#Now correlate
if actuals_temp_master_sol == seqIdDict['masterSol']:
actual_found = True
seqIdDict['actActivityDur'] = hits[kk]['dur_earth']
#calculate actual margin
seqIdDict['actMarginDur'] = seqIdDict['planTotalDur'] - seqIdDict['actActivityDur']
break
if not actual_found:
if verbose:
print('')
print('ERROR: Found one or more as run durations associated with submaster: ' + seqId + ' on sol ' +str(masterSol)+', ')
print('but could not find a corresponding actual duration on this sol. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMultipleActualsErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to find an actual execution duration for submaster: ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMissingActualsErrors += 1
continue
if keepSeqId:
#calculate the activity duration
seqIdDict['planActivityDur'] = seqIdDict['planTotalDur']-seqIdDict['planMarginDur']
submasters[uniqueID] = seqIdDict
# --------------------------------------- Print Errors and summaries of dropped entries -----------------------------------------
print('')
print('Kept ' + str(len(submasters)) + ' of ' + str(totalHits) + ' for analysis.')
print('Removed ' + str(numDuplicateSubsErrors) + ' submasters because of duplication in the databse.')
print('Removed ' + str(numKeepOutSolsErrors) + ' submasters because of user defined keep out sols.')
print('Removed ' + str(numSubDatabaseErrors) + ' submasters because of errors associated with reading expected fields in the database.')
print('Removed ' + str(numMissingMarginErrors) + ' submasters because script could not identify the associated margin.')
print('Removed ' + str(numMarginDatabaseErrors) + ' submasters because there were database issues with the identified margin.')
print('Removed ' + str(numMultipleActualsErrors) + ' submasters because there were database issues with the identified actual durations (implying it may not have executed).')
print('Removed ' + str(numMissingActualsErrors) + ' submasters because there were no actuals for the submaster (implying it did not execute).')
with open(filename + '.json', 'w') as fp:
json.dump(submasters, fp, sort_keys=True, indent=4, encoding = 'utf-8')
with open('unidentifiedChildren.json', 'w') as fp2:
json.dump(unidentifiedBackbones, fp2, sort_keys=True, indent=4)
with open('differentNamesforMargin.json', 'w') as fp3:
json.dump(marginNamesSanityCheck, fp3, sort_keys = True, indent= 4)
with open('childNamesWhenMissingMargins.json', 'w') as fp3:
json.dump(noMarginFoundChildNames, fp3, sort_keys = True, indent= 4)
print('Successfully wrote output to ' + filename + '.json')
print('Script Complete')
end = time.time()
mins = 0
result_time = end - start
if result_time > 60:
mins = int(floor(result_time/60))
seconds = int(floor(result_time % 60))
print("Run time: {} minutes {} seconds".format(mins, seconds))
else:
print("Run time: {} seconds".format(result_time))
#print(submasters)
###############################################################################
#def index_docs(docs):
# helpers.bulk(es,docs)
###############################################################################
def usage(): #Prints out usage statement
print("")
print(sys.argv[0])
print("Analyzes the durations of Submasters and associated parameters for the Margin Workging Group\n")
print("USAGE:")
###############################################################################
if __name__ == "__main__":
main()
|
14,560 | 0e263353c43ac17e54e9281dcf2d707cbb5203f6 | infos = {"name":"remove_channel","require":["message","commandes","commu"],"show":2,"use":1}
async def command(message,commandes,commu):
if await commandes["admin"][0](message,commu):
await message.delete()
channs = commu[str(message.channel.category_id)][0].remove(message.channel.id)
await message.channel.delete() |
14,561 | d2e9b8214faf47d9ef7da72941833178caba5711 | #!/usr/bin/python
import sys, os, Image, time, psutil, ImageDraw
import SlideshowPanel as spanel
import TextureLoader as tl
import numpy as np
class SlideshowPlaylist:
def __init__(self, _path, _scale, _fade, _titleDur, _duration, _font, _fontSize):
try:
f = open(_path+'titles', 'r')
self.titles = f.readlines()
f.close()
except:
self.titles = []
self.textures = []
self.dirNum = -1
self.scale = _scale
self.panelOrder = [ 0, 1 ]
dirs = []
for _root, _dirs, files in os.walk(_path):
root = _root
for d in _dirs:
dirs.append(d)
break
dirs.sort()
for d in dirs:
i = dirs.index(d)
title = None
if len(self.titles) > 0: title = self.titles[i]
self.textures.append(tl.TextureLoader(root, dirs[i], title, _fade, _titleDur, _duration, self.scale, _font, _fontSize))
self.panels = [None, None]
self.INIT = False
def update(self, _size):
if self.INIT != True:
self.start = time.time() * 1000
self.next_playlist(0)
self.INIT = True
for k in self.panelOrder:
if self.panels[k] != None:
resp = self.panels[k].update(time.time() * 1000, _size)
if resp == 1 and self.panels[int(k==0)] == None:
self.next_panel(int(k == 0))
elif resp == 2 and self.panels[int(k==0)] != None:
self.panels[k] = None
def next_panel(self, _panelNum):
if self.imageNum == len(self.textures[self.dirNum].playlist):
self.next_playlist(_panelNum)
else:
self.panels[_panelNum] = spanel.SlideshowPanel(self.textures[self.dirNum].playlist[self.imageNum], time.time() * 1000)
self.imageNum += 1
self.panelOrder = self.panelOrder[::-1]
def next_playlist(self, _panelNum):
# if self.dirNum > -1: self.textures[self.dirNum].kill()
self.dirNum+=1
if self.dirNum > len(self.textures): self.dirNum = 0
offset = 0
if self.dirNum > 0: offset = self.textures[self.dirNum-1].playlist[len(self.textures[self.dirNum].playlist)-1]['i'] + 1
self.textures[self.dirNum].load(offset)
self.panels[_panelNum] = spanel.SlideshowPanel(self.textures[self.dirNum].playlist[0], time.time() * 1000)
self.imageNum = 1
|
14,562 | 176e2f760b4912ba2067ed03291062835ae831c7 | from p_load_points import load_points
import random
def initial_solution(vec):
vec2 = vec[:]
number_of_points = len(vec)
number_of_points_for_solution = (3 * number_of_points)/5
number_of_points_for_solution = round(number_of_points_for_solution)
first_solution = []
first_solution.append(0)
for x in range(number_of_points_for_solution):
r = random.choice(vec2)
first_solution.append(r)
vec2.remove(r)
first_solution.append(0)
return first_solution
if __name__ == '__main__':
atr = load_points('points.txt')
first_solution = initial_solution(atr)
print(first_solution)
|
14,563 | 0c650b700edfa61ac71cbfdb5095580c1053c70d | #!/usr/bin/env python2
import json
import math
import os
import logging
import time
import geojson
import threading
import utils
from pgoapi import pgoapi
from pgoapi import utilities as util
from pgoapi.exceptions import NotLoggedInException, ServerSideRequestThrottlingException, ServerBusyOrOfflineException
from s2sphere import CellId, LatLng
pokes = {}
spawns = {}
stops = {}
gyms = {}
scans = []
num2words = ['first','second','third','fourth','fifth','sixth']
#config file
with open('config.json') as file:
config = json.load(file)
def doScanp(wid, sLat, sLng, api):
for i in range(0,10):
try:
doScan(wid, sLat, sLng, api)
except (KeyError,TypeError):
print('thread {} error scan returned error, retry {}/10').format(wid,i)
time.sleep(config['scanDelay'])
continue
else:
break
def doScan(wid, sLat, sLng, api):
#print ('scanning ({}, {})'.format(sLat, sLng))
api.set_position(sLat,sLng,0)
cell_ids = util.get_cell_ids(lat=sLat, long=sLng, radius=80)
timestamps = [0,] * len(cell_ids)
while True:
try:
response_dict = api.get_map_objects(latitude = sLat, longitude = sLng, since_timestamp_ms = timestamps, cell_id = cell_ids)
except ServerSideRequestThrottlingException:
config['scanDelay'] += 0.5
print ('Request throttled, increasing sleep by 0.5 to {}').format(config['scanDelay'])
time.sleep(config['scanDelay'])
continue
except:
time.sleep(config['scanDelay'])
api.set_position(sLat,sLng,0)
time.sleep(config['scanDelay'])
continue
break
try:
cells = response_dict['responses']['GET_MAP_OBJECTS']['map_cells']
except TypeError:
print ('thread {} error getting map data for {}, {}'.format(wid,sLat, sLng))
raise
except KeyError:
print ('thread {} error getting map data for {}, {}'.format(wid,sLat, sLng))
raise
return
for cell in cells:
curTime = cell['current_timestamp_ms']
if 'wild_pokemons' in cell:
for wild in cell['wild_pokemons']:
if wild['time_till_hidden_ms']>0:
timeSpawn = (curTime+(wild['time_till_hidden_ms']))-900000
gmSpawn = time.gmtime(int(timeSpawn/1000))
secSpawn = (gmSpawn.tm_min*60)+(gmSpawn.tm_sec)
phash = '{},{}'.format(timeSpawn,wild['spawn_point_id'])
shash = '{},{}'.format(secSpawn,wild['spawn_point_id'])
pokeLog = {'time':timeSpawn, 'sid':wild['spawn_point_id'], 'lat':wild['latitude'], 'lng':wild['longitude'], 'pid':wild['pokemon_data']['pokemon_id'], 'cell':CellId.from_lat_lng(LatLng.from_degrees(wild['latitude'], wild['longitude'])).to_token()}
spawnLog = {'time':secSpawn, 'sid':wild['spawn_point_id'], 'lat':wild['latitude'], 'lng':wild['longitude'], 'cell':CellId.from_lat_lng(LatLng.from_degrees(wild['latitude'], wild['longitude'])).to_token()}
pokes[phash] = pokeLog
spawns[shash] = spawnLog
if 'forts' in cell:
for fort in cell['forts']:
if fort['enabled'] == True:
if 'type' in fort:
#got a pokestop
stopLog = {'id':fort['id'],'lat':fort['latitude'],'lng':fort['longitude'],'lure':-1}
if 'lure_info' in fort:
stopLog['lure'] = fort['lure_info']['lure_expires_timestamp_ms']
stops[fort['id']] = stopLog
if 'gym_points' in fort:
gymLog = {'id':fort['id'],'lat':fort['latitude'],'lng':fort['longitude'],'team':0}
if 'owned_by_team' in fort:
gymLog['team'] = fort['owned_by_team']
gyms[fort['id']] = gymLog
time.sleep(config['scanDelay'])
def genwork():
totalwork = 0
for rect in config['work']:
dlat = 0.00089
dlng = dlat / math.cos(math.radians((rect[0]+rect[2])*0.5))
startLat = min(rect[0], rect[2])+(0.624*dlat)
startLng = min(rect[1], rect[3])+(0.624*dlng)
latSteps = int((((max(rect[0], rect[2])-min(rect[0], rect[2])))/dlat)+0.75199999)
if latSteps<1:
latSteps=1
lngSteps = int((((max(rect[1], rect[3])-min(rect[1], rect[3])))/dlng)+0.75199999)
if lngSteps<1:
lngSteps=1
for i in range(latSteps):
if (i%2)==0:
for j in range(0,lngSteps,1):
scans.append([startLat+(dlat*i), startLng+(dlng*j)])
else:
for j in range(lngSteps-1,-1,-1):
scans.append([startLat+(dlat*i), startLng+(dlng*j)])
totalwork += latSteps * lngSteps
return totalwork
def worker(wid,Wstart):
workStart = min(Wstart,len(scans)-1)
workStop = min(Wstart+config['stepsPerPassPerWorker'],len(scans)-1)
if workStart == workStop:
return
print 'worker {} is doing steps {} to {}'.format(wid,workStart,workStop)
#login
login_attempt = 1
logged_in = False
while not logged_in:
api = pgoapi.PGoApi(provider=config['auth_service'], username=config['users'][wid]['username'], password=config['users'][wid]['password'], position_lat=0, position_lng=0, position_alt=0)
api.activate_signature(utils.get_encryption_lib_path())
try:
api.get_player()
logged_in = True
except NotLoggedInException:
print('thread {} Login Error, retry {}/10').format(wid,login_attempt)
login_attempt += 1
time.sleep(0.5)
#iterate
for j in range(5):
startTime = time.time()
print 'worker {} is doing {} pass'.format(wid,num2words[j])
for i in xrange(workStart,workStop):
doScanp(wid,scans[i][0], scans[i][1], api)
curTime=time.time()
if 600-(curTime-startTime) > 0:
print 'worker {} took {} seconds to do {} pass, now sleeping for {}'.format(wid,curTime-startTime,num2words[j],600-(curTime-startTime))
time.sleep(600-(curTime-startTime))
else:
print 'worker {} took {} seconds to do {} pass so not sleeping'.format(wid,curTime-startTime,num2words[j])
startTime = time.time()
print 'worker {} is doing {} pass'.format(wid,num2words[5])
for i in xrange(workStart,workStop):
doScanp(wid,scans[i][0], scans[i][1], api)
curTime=time.time()
print 'worker {} took {} seconds to do {} pass ending thread'.format(wid,curTime-startTime,num2words[5])
def main():
tscans = genwork()
print 'total of {} steps'.format(tscans)
numWorkers = ((tscans-1)//config['stepsPerPassPerWorker'])+1
if numWorkers > len(config['users']):
numWorkers = len(config['users'])
print 'with {} worker(s), doing {} scans each, would take {} hour(s)'.format(numWorkers,config['stepsPerPassPerWorker'],int(math.ceil(float(tscans)/(numWorkers*config['stepsPerPassPerWorker']))))
if (config['stepsPerPassPerWorker']*config['scanDelay']) > 600:
print 'error. scan will take more than 10mins so all 6 scans will take more than 1 hour'
print 'please try using less scans per worker'
return
#heres the logging setup
# log settings
# log format
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(module)10s] [%(levelname)5s] %(message)s')
# log level for http request class
logging.getLogger("requests").setLevel(logging.WARNING)
# log level for main pgoapi class
logging.getLogger("pgoapi").setLevel(logging.WARNING)
# log level for internal pgoapi class
logging.getLogger("rpc_api").setLevel(logging.WARNING)
if config['auth_service'] not in ['ptc', 'google']:
log.error("Invalid Auth service specified! ('ptc' or 'google')")
return None
#setup done
threads = []
scansStarted = 0
for i in xrange(len(config['users'])):
if scansStarted >= len(scans):
break;
time.sleep(1)
t = threading.Thread(target=worker, args = (i,scansStarted))
t.start()
threads.append(t)
scansStarted += config['stepsPerPassPerWorker']
while scansStarted < len(scans):
time.sleep(15)
for i in xrange(len(threads)):
if not threads[i].isAlive():
threads[i] = threading.Thread(target=worker, args = (i,scansStarted))
threads[i].start()
scansStarted += config['stepsPerPassPerWorker']
for t in threads:
t.join()
print 'all done. saving data'
out = []
for poke in pokes.values():
out.append(poke)
f = open('pokes.json','w')
json.dump(out,f)
f.close()
out = []
for poke in spawns.values():
out.append(poke)
f = open('spawns.json','w')
json.dump(out,f)
f.close()
out = []
for poke in stops.values():
out.append(poke)
f = open('stops.json','w')
json.dump(out,f)
f.close()
out = []
for poke in gyms.values():
out.append(poke)
f = open('gyms.json','w')
json.dump(out,f)
f.close()
#output GeoJSON data
with open('gyms.json') as file:
items = json.load(file)
geopoints = []
for location in items:
point = geojson.Point((location['lng'], location['lat']))
feature = geojson.Feature(geometry=point, id=location['id'],properties={"name":location['id']})
geopoints.append(feature)
features = geojson.FeatureCollection(geopoints)
f = open('geo_gyms.json','w')
json.dump(features,f)
f.close()
with open('stops.json') as file:
items = json.load(file)
geopoints = []
for location in items:
point = geojson.Point((location['lng'], location['lat']))
feature = geojson.Feature(geometry=point, id=location['id'],properties={"name":location['id']})
geopoints.append(feature)
features = geojson.FeatureCollection(geopoints)
f = open('geo_stops.json','w')
json.dump(features,f)
f.close()
if __name__ == '__main__':
main()
|
14,564 | 5fdc91db26ea54cf64952a041ddb43b0bfc7e0eb | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import mdsExceptions, Device
class SPIDER(Device):
"""MARTe configuration"""
parts = [{'path': ':COMMENT', 'type': 'text'},
{'path': ':CAMERA_FREQ', 'type': 'numeric', 'value': 10},
{'path': ':CAMERA_START', 'type': 'numeric', 'value': 0},
{'path': ':CAMERA_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':CAEN_FREQ', 'type': 'numeric', 'value': 2},
{'path': ':CAEN_START', 'type': 'numeric', 'value': 0},
{'path': ':CAEN_DURAT', 'type': 'numeric', 'value': 30},
{'path': ':NI_FREQ', 'type': 'numeric', 'value': 10000},
{'path': ':NI_START', 'type': 'numeric', 'value': 0},
{'path': ':NI_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':NI6368_FREQ', 'type': 'numeric', 'value': 10000},
{'path': ':NI6368_START', 'type': 'numeric', 'value': 0},
{'path': ':NI6368_DURAT', 'type': 'numeric', 'value': 60},
{'path': ':BREAK_DEAD', 'type': 'numeric', 'value': 10},
{'path': ':BREAK_REC', 'type': 'numeric', 'value': 0},
{'path': '.WAVE_1', 'type': 'structure'},
{'path': '.WAVE_1:WAVE', 'type': 'signal'},
{'path': '.WAVE_1:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_1:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_1:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_1:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_2', 'type': 'structure'},
{'path': '.WAVE_2:WAVE', 'type': 'signal'},
{'path': '.WAVE_2:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_2:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_2:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_2:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_3', 'type': 'structure'},
{'path': '.WAVE_3:WAVE', 'type': 'signal'},
{'path': '.WAVE_3:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_3:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_3:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_3:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_4', 'type': 'structure'},
{'path': '.WAVE_4:WAVE', 'type': 'signal'},
{'path': '.WAVE_4:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_4:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_4:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_4:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_5', 'type': 'structure'},
{'path': '.WAVE_5:WAVE', 'type': 'signal'},
{'path': '.WAVE_5:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_5:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_5:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_5:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_6', 'type': 'structure'},
{'path': '.WAVE_6:WAVE', 'type': 'signal'},
{'path': '.WAVE_6:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_6:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_6:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_6:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_7', 'type': 'structure'},
{'path': '.WAVE_7:WAVE', 'type': 'signal'},
{'path': '.WAVE_7:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_7:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_7:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_7:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_8', 'type': 'structure'},
{'path': '.WAVE_8:WAVE', 'type': 'signal'},
{'path': '.WAVE_8:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_8:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_8:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_8:MAX_Y', 'type': 'numeric'},
{'path': '.WAVE_REC', 'type': 'structure'},
{'path': '.WAVE_REC:WAVE', 'type': 'signal'},
{'path': '.WAVE_REC:MIN_X', 'type': 'numeric'},
{'path': '.WAVE_REC:MAX_X', 'type': 'numeric'},
{'path': '.WAVE_REC:MIN_Y', 'type': 'numeric'},
{'path': '.WAVE_REC:MAX_Y', 'type': 'numeric'}]
|
14,565 | 68cc4f6bf51b9de0b6882a0e4d4b4a3f9ae543c7 | import json
from policyuniverse.policy import Policy
from policyuniverse.expander_minimizer import expand_policy
# White listed policies (e.g. the approved admin policies) can be specified here, or as an
# exception to this policy.
ADMIN_ACTIONS = {
'Permissions',
}
def policy(resource):
iam_policy = Policy(expand_policy(json.loads(resource['PolicyDocument'])))
action_summary = iam_policy.action_summary()
# Check if the policy grants any administrative privileges
return not any(
ADMIN_ACTIONS.intersection(action_summary[service])
for service in action_summary)
|
14,566 | 8c79b71935fa8623f77323517e4a82f3b537b101 | # -*- coding: utf-8 -*-
# @Time : 2019-01-09 17:41
# @Author : 彭涛
# @Site :
# @File : form.py
# @Software: PyCharm
from django import forms
from django.forms import widgets
from django.forms import fields
from django.forms import ModelForm
from django.forms import inlineformset_factory
from .models import *
class vocher_form(ModelForm):
class Meta:
model = voucher
fields = ("voucher_no","date","enclosure","total_dr","total_cr","accounting_supervisor","book_keepinger","cashier","auditor","order_makinger","isbookkeeping",)
vochercontentFormSet=inlineformset_factory(voucher,voucher_content,fields=("brife","accountingsubject","accountingsubject_2",),extra=3, can_delete=False, max_num=5)
class vocher_input_fom(forms.Form):
date=forms.DateField(required=True,error_messages={"required": "不能为空","invalid": "格式错误","min_length": "用户名最短8位"})
enclosure=forms.IntegerField(required=False,error_messages={"invalid": "格式错误"})
accounting_supervisor=forms.CharField(max_length=12,required=True,error_messages={"required": "主管签字不能为空","invalid": "格式错误","max_length": "主管太长"})
voucher_no=forms.CharField(max_length=10,required=True,error_messages={"required": "不能为空","invalid": "格式错误","max_length": "凭证编码太长"})
book_keepinger=forms.CharField(required=False,max_length=12,error_messages={"invalid": "格式错误","max_length": "人名不大于六字符"})
cashier=forms.CharField(required=False,max_length=12,error_messages={"invalid": "格式错误","max_length": "人名不大于六字符"})
auditor=forms.CharField(required=False,max_length=12,error_messages={"invalid": "格式错误","max_length": "人名不大于六字符"})
order_makinger=forms.CharField(label="制单人",max_length=12,error_messages={"invalid": "格式错误","max_length": "人名不大于六字符"})
total_dr = forms.DecimalField(required=False, max_digits=11, decimal_places=2)
total_cr = forms.DecimalField(required=False, max_digits=11, decimal_places=2)
isbookkeeping = forms.BooleanField(required=False)
vc_bf_1=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_1=forms.IntegerField(required=False)
vc_ac2_1=forms.IntegerField(required=False)
vc_dr_1=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_1=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_1=forms.BooleanField(required=False)
vc_bf_2=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_2 = forms.IntegerField(required=False)
vc_ac2_2 = forms.IntegerField(required=False)
vc_dr_2=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_2=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_2= forms.BooleanField(required=False)
vc_bf_3=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_3 = forms.IntegerField(required=False)
vc_ac2_3 = forms.IntegerField(required=False)
vc_dr_3=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_3=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_3 = forms.BooleanField(required=False)
vc_bf_4=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_4 = forms.IntegerField(required=False)
vc_ac2_4 = forms.IntegerField(required=False)
vc_dr_4=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_4=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_4= forms.BooleanField(required=False)
vc_bf_5=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_5= forms.IntegerField(required=False)
vc_ac2_5 = forms.IntegerField(required=False)
vc_dr_5=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_5=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_5 = forms.BooleanField(required=False)
vc_bf_6=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_6 = forms.IntegerField(required=False)
vc_ac2_6 = forms.IntegerField(required=False)
vc_dr_6=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_6=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_6 = forms.BooleanField(required=False)
vc_bf_7=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_7 = forms.IntegerField(required=False)
vc_ac2_7 = forms.IntegerField(required=False)
vc_dr_7=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_7=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_7 = forms.BooleanField(required=False)
vc_bf_8=forms.CharField(required=False,max_length=50,error_messages={"invalid": "格式错误","max_length": "摘要超过了50个字符"})
vc_ac_8 = forms.IntegerField(required=False)
vc_ac2_8 = forms.IntegerField(required=False)
vc_dr_8=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_cr_8=forms.DecimalField(required=False,max_digits=11,decimal_places=2)
vc_isBookkeeping_8 = forms.BooleanField(required=False)
4 |
14,567 | 38b9e15e17ec56e02acbaa2d68ff775d387e0d46 | import tensorflow as tf
from tensorflow.contrib.layers.python.layers.layers import layer_norm
__author__ = 'assafarbelle'
def conv(in_tensor,
name,
kx,
ky,
kout,
stride=None,
biased=True,
kernel_initializer=None,
biase_initializer=None,
padding='VALID',
data_format='NHWC',
reuse=False
):
with tf.variable_scope(name, reuse=reuse):
channel = 1 if data_format == 'NCHW' else 3
in_shape = in_tensor.get_shape().as_list()[channel]
kernel_shape = [kx, ky, in_shape, kout]
if not stride:
stride = [1, 1, 1, 1]
elif isinstance(stride, int):
if channel == 3:
stride = [1, stride, stride, 1]
else:
stride = [1, 1, stride, stride]
elif isinstance(stride, list) and len(stride) == 2:
if channel == 3:
stride = [1] + stride + [1]
else:
stride = [1, 1] + stride
kernel = tf.get_variable('weights', shape=kernel_shape, initializer=kernel_initializer)
conv = tf.nn.conv2d(in_tensor, kernel, strides=stride, padding=padding, data_format=data_format)
if biased:
b = tf.get_variable('bias', kout, initializer=biase_initializer)
out = tf.nn.bias_add(conv, b, data_format=data_format, name=name)
else:
out = conv
b = None
return out, kernel, b
def conv2d_transpose(in_tensor,
name,
kx,
ky,
kout,
outshape,
stride=None,
biased=True,
kernel_initializer=None,
biase_initializer=None,
padding='VALID',
data_format='NHWC'
):
with tf.variable_scope(name):
channel = 1 if data_format == 'NCHW' else 3
in_shape = in_tensor.get_shape().as_list()[channel]
kernel_shape = [kx, ky, kout, in_shape]
if not stride:
stride = [1, 1, 1, 1]
elif isinstance(stride, int):
stride = [1, stride, stride, 1]
elif isinstance(stride, list) and len(stride) == 2:
stride = [1] + stride + [1]
kernel = tf.get_variable('weights', shape=kernel_shape, initializer=kernel_initializer)
conv_t = tf.nn.conv2d_transpose(in_tensor, kernel, output_shape=outshape, strides=stride, padding=padding,
data_format=data_format)
if biased:
b = tf.get_variable('bias', kout, initializer=biase_initializer)
out = tf.nn.bias_add(conv_t, b, name=name)
else:
out = conv_t
b = None
return out, kernel, b
def fc(in_tensor, name, kout,
biased=True,
weights_initializer=None,
biase_initializer=None,
):
in_shape = in_tensor.get_shape().as_list()
if len(in_shape) > 2:
in_tensor = tf.reshape(in_tensor, [in_shape[0], -1])
in_shape = in_tensor.get_shape().as_list()[1]
with tf.variable_scope(name):
weights_shape = [in_shape, kout]
weights = tf.get_variable('weights', weights_shape, initializer=weights_initializer)
matmul = tf.matmul(in_tensor, weights, name=name)
if biased:
b = tf.get_variable('bias', kout, initializer=biase_initializer)
out = tf.add(matmul, b, name=name)
else:
out = matmul
b = None
return out, weights, b
def leaky_relu(in_tensor, name, alpha=0.1):
return tf.maximum(in_tensor, tf.multiply(tf.constant(alpha), in_tensor), name=name)
def max_pool(in_tensor, name, ksize=None, strides=None, padding='VALID', data_format='NHWC'):
channel = 1 if data_format == 'NCHW' else 3
if not ksize:
if channel == 3:
ksize = [1, 2, 2, 1]
else:
ksize = [1, 1, 2, 2]
elif isinstance(ksize, int):
if channel == 3:
ksize = [1, ksize, ksize, 1]
else:
ksize = [1, 1, ksize, ksize]
elif isinstance(strides, list) and len(ksize) == 2:
if channel == 3:
ksize = [1] + ksize + [1]
else:
ksize = [1, 1] + ksize
if not strides:
if channel == 3:
strides = [1, 2, 2, 1]
else:
strides = [1, 1, 2, 2]
elif isinstance(strides, int):
if channel == 3:
strides = [1, strides, strides, 1]
else:
strides = [1, 1, strides, strides]
elif isinstance(strides, list) and len(strides) == 2:
if channel == 3:
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
return tf.nn.max_pool(in_tensor, ksize, strides, padding, name=name, data_format=data_format)
def batch_norm(in_tensor, phase_train, name, reuse=None, data_format='NHWC', center=True, scale=True):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
n_out: integer, depth of input maps
phase_train: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
axis = -1 if data_format == 'NHWC' else 1
with tf.variable_scope(name):
# return tf.contrib.layers.batch_norm(in_tensor, is_training=phase_train, scope=scope, reuse=reuse)
return tf.layers.batch_normalization(in_tensor, axis=axis, center=center, scale=scale, training=phase_train,
reuse=reuse, fused=True, momentum=0.99, epsilon=1e-1)
def layer_norm(in_tensor, data_format='NHWC'):
if data_format == 'NCHW':
in_tensor = tf.transpose(in_tensor, (0, 2, 3, 1))
out_tensor = layer_norm(in_tensor)
if data_format == 'NCHW':
out_tensor = tf.transpose(out_tensor, (0, 3, 1, 2))
return out_tensor
|
14,568 | 28b23181f4c2d3d6e204852b8ab0caed0347935c | import numpy as np
import random
# Find homography which map @p2 to @p1.
#
# Inputs:
# @p1: point set 1.
# @p2: point set 2.
#
# Return: Homography for @p1 and @p2.
def GetHomography(p1, p2):
assert isinstance(p1, np.ndarray) and isinstance(p2, np.ndarray)
assert p1.shape[0] == p2.shape[0]
# Build the normalization matrix.
def get_normalization_matrix(points):
x_mean, y_mean = np.mean(points, axis=0)
var_x, var_y = np.var(points, axis=0)
s_x, s_y = np.sqrt(2/var_x), np.sqrt(2/var_y)
return np.array([[s_x, 0, -s_x*x_mean],
[ 0, s_y, -s_y*y_mean],
[ 0, 0, 1]]), \
np.array([[1/s_x, 0, x_mean],
[ 0, 1/s_y, y_mean],
[ 0, 0, 1]])
# Normalize p1.
N_p1, N_inv_p1 = get_normalization_matrix(p1)
p1 = np.hstack((p1, np.ones((p1.shape[0], 1)))).T
p1 = N_p1.dot(p1).T
# Normalize p2.
N_p2, _ = get_normalization_matrix(p2)
p2 = np.hstack((p2, np.ones((p2.shape[0], 1)))).T
p2 = N_p2.dot(p2).T
# Build the "P" matrix in the homogeneous equation in 02-camera p.73
#
# @P: The "P" matrix.
P = np.zeros((len(p1)<<1, 9))
P[::2, :3] = p2
P[1::2, 3:6] = p2
P[::2, 6:] = -p2 * p1[:, 0, None]
P[1::2, 6:] = -p2 * p1[:, 1, None]
# The homography @H is the last column of the right singular matrix "V" of P.
# Please remind that we get the tranpose of "V" through np.linalg.svd. Thus,
# @H is the last **row** of "V".
_, _, vh = np.linalg.svd(P, full_matrices=False)
tmp = vh[-1].reshape((3, 3))
H = N_inv_p1.dot(tmp).dot(N_p2)
return H
# Implement RANSAC.
#
# Inputs:
# @p1: point set 1.
# @p2: point set 2.
#
# Return: Best fit homography between @p1 and @p2.
def RANSAC(p1, p2):
assert isinstance(p1, np.ndarray) and isinstance(p2, np.ndarray)
assert p1.shape[0] == p2.shape[0]
# Sample @n_samples pairs in each iteration.
n_samples = 8 # int(p1.shape[0] * 0.1)
# Total @n_iters iterations.
outlier_ratio = 0.05
n_iters = int(np.log(1 - 0.99) / np.log(1 - (1-outlier_ratio)**n_samples))
inlier_threshold = 10.0
best_homography = None
best_inlier_ratio = 0.0
for _ in range(n_iters):
# Get sample pairs.
rand_idx = random.sample(range(0, p1.shape[0]), n_samples)
tmp_p1, tmp_p2 = p1[rand_idx], p2[rand_idx]
# Get homography which map tmp_p2 to tmp_p1.
H = GetHomography(tmp_p1, tmp_p2)
# Map p2 to p1's coordinate by the homography we got.
tmp_p2 = np.hstack((p2, np.ones((p2.shape[0], 1)))).T
map_p2 = H @ tmp_p2
map_p2 /= map_p2[2, :]
map_p2 = map_p2[:2, :].T
# Use square error.
error = np.sqrt(np.sum((p1 - map_p2) ** 2, axis=1))
# Calculate inlier ratio according to the threshold.
inlier_num = len(error[error < inlier_threshold])
inlier_ratio = inlier_num / p1.shape[0]
if inlier_ratio >= best_inlier_ratio:
best_inlier_ratio = inlier_ratio
best_homography = H
return best_homography
|
14,569 | bda95ecc3a31745a25b88022463802dd1ff90278 | name = "Zen"
print("My name is " , 2) |
14,570 | 8b9b0b52606063c78aa47fe510925bf21a046d92 | from Poker import Poker
class Player:
def __init__(self, name, money=2000):
self._name = name
self._money = money
self._currentGame = None
self._currentIndex = None
def getName(self):
return self._name
def shufflePoker(self):
self._currentGame.shuffle()
print("Player " + self._name + ' has shuffled poker.')
def enterGame(self, game, index):
self._currentGame = game
self._currentIndex = index
print('Player ' + self._name + ' has entered game ' + str(game.getId()) + '!')
def call(self):
chip = self._currentGame.recall(self._currentIndex)
self._money -= chip
print('%s跟注了%d个筹码' % (self._name, chip))
def quit(self):
self._currentGame.reQuit(self._currentIndex)
print('%s弃牌了' % self._name)
def Raise(self, rate=2):
chip = self._currentGame.reRaise(self._currentIndex, rate)
self._money -= chip
print('%s加注了%d个筹码' % (self._name, chip))
def flop(self):
cardsStr = self._currentGame.reFlop(self._currentIndex)
print('你的牌是: ' + cardsStr)
def winGame(self, money):
self._money += money
|
14,571 | 39c39b78e3930aec1993de96dc09b7f1ddf7495e | #!/usr/bin/env python
# BSD Licence
# Copyright (c) 2011, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# subset_2d_cdat.py
#
#
# Scripts implementing this test should subset a dataset to a given
# bounding box. Scripts can assume the given variable is a 2D field.
#
# usage: %prog dataset variable bbox outputfile
#
# where bbox = 'lon0,lon1,lat0,lat1'
import sys
import cdms2
dataset, variable, bbox, outfile = sys.argv[1:]
lon0, lon1, lat0, lat1 = (float(x) for x in bbox.split(','))
ds = cdms2.open(dataset)
var = ds[variable]
subset = var(longitute=(lon0, lon1),
latitude=(lat0, lat1))
out_ds = cdms2.open(outfile, 'w')
out_ds.write(subset)
out_ds.close()
|
14,572 | b14f16475f98ff71584e2ce3d4afbbebadfcc7d1 | # -*- coding: utf-8 -*-
import unittest
from http_clients import *
from stomp_client import *
from mqtt_client import *
import sys
import logging
import json
logger = logging.getLogger()
logger.level = logging.INFO
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
# For tests to print function name
# this_function_name = sys._getframe().f_code.co_name
import sys
'''
NOTE: All tests here need a Byteport instance to communicate with
'''
class TestHttpClientBase(unittest.TestCase):
PRODUCTION = ('api.byteport.se', '-(LOOK IT UP)-', '-(LOOK IT UP)-', '-(LOOK IT UP)-')
STAGE = ('stage.byteport.se', '-(LOOK IT UP)-', '-(LOOK IT UP)-', '-(LOOK IT UP)-')
ACCEPTANCE = ('acc.www.byteport.se', 'd74f48f8375a32ca632fa49a', 'N/A', 'N/A')
LOCALHOST = ('localhost:8000', 'TEST', 'admin@igw.se', 'admin')
TEST_ENVIRONMENT = LOCALHOST
byteport_api_hostname = TEST_ENVIRONMENT[0]
key = TEST_ENVIRONMENT[1]
test_user = TEST_ENVIRONMENT[2]
test_password = TEST_ENVIRONMENT[3]
namespace = 'test'
device_uid = 'byteport-api-tests'
class TestStore(TestHttpClientBase):
def test_should_store_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'string': 'hello string'}
# Will raise exception upon errors
client.store(data)
def test_should_receive_error_for_missing_deviceuid_using_GET_client(self):
try:
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=''
)
data = {'string': 'hello string'}
# Will raise exception upon errors
client.store(data)
except ByteportClientDeviceNotFoundException:
return
raise Exception("Unit under test did not raise the correct exception!")
def test_should_store_data_series_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
for v in range(0, 10):
data = {'ramp': float(v)+0.0001}
client.store(data)
time.sleep(0.2)
def test_should_store_utf8_convertibel_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Unicode string that can be converted to UTF-8
data = {'unicode_string': u'mötley crüe'}
client.store(data)
def test_should_not_store_non_utf8_convertible_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# A sting that can not be encoded to UTF-8: exception should be thrown client side
data = {'unicode_string': '\x80'}
self.assertRaises(ByteportClientInvalidDataTypeException, client.store, data)
def test_should_store_number_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'number': 1337}
# Will raise exception upon errors
client.store(data)
def test_should_store_number_to_single_field_name_with_custom_high_prec_timestamp_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'number': 1338}
# Will raise exception upon errors
custom_timestamp = datetime.datetime.strptime('2015-05-01T00:00:00.012345', '%Y-%m-%dT%H:%M:%S.%f')
client.store(data, timestamp=custom_timestamp)
def test_should_log_info_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Will raise exception upon errors
client.log('info from integration tests using GET API. Lets repete this boring message just to get a shit load of text so it wont be truncated anywhere along the way: info from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET API', 'info')
def test_should_log_info_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Will raise exception upon errors
client.log('info from integration tests using POST API', 'info')
def test_should_store_string_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'fukt': 20}
# Will raise exception upon errors
client.store(data)
class TestHttpClientPacketStore(TestHttpClientBase):
def test_should_store_packets_using_POST_client_vs_legacy_api(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
p0 = dict()
p0['uid'] = self.device_uid
p0['namespace'] = self.namespace
p0['timestamp'] = '%s' % (time.time()+20)
p0['data'] = 'f1=10;f2=20;'
p1 = dict()
p1['uid'] = self.device_uid
p1['namespace'] = self.namespace
p1['timestamp'] = '%s' % (time.time()-20)
p1['data'] = 'f1=10;f2=20;'
# Will raise exception upon errors
client.store_packets([p0, p1], 'INSERT_LEGACY_KEY')
def test_should_store_text_data_base64_encoded_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'string_b64'
data_block = 'hello world'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, data_block)
def test_should_store_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'bin_b64'
binary_data = '\x10\x20\x30\x40'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, binary_data)
def test_should_compress_and_store_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'bin_gzip_b64'
binary_data = '\x10\x20\x30\x40'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, binary_data, compression='gzip')
def test_should_store_10K_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer))
def test_should_store_10K_binary_data_and_gzip_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_gzip_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer), compression='gzip')
def test_should_store_10K_binary_data_and_bzip2_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_bzip2_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer), compression='bzip2')
def test_should_store_test_file_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'file_integer_raw'
# Will raise exception upon errors
client.store_file(field_name, './integer.txt')
def test_should_store_test_file_and_bzip2_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'file_bzip2_b64'
# Will raise exception upon errors
client.base64_encode_and_store_file(field_name, './test_file_for_integration_tests.txt', compression='bzip2')
def test_should_store_directory(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid,
initial_heartbeat=False
)
client.store_directory('./test_directory', 'dir_storing_test')
class TestHttpClientLogin(TestHttpClientBase):
def test_should_login_with_correct_credentials(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
def test_should_login_and_logout_and_not_have_access_after(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
client.logout()
try:
client.list_namespaces()
except Exception as e:
return
raise Exception("list_namespaces() did not raise exception after logout!")
def test_should_not_login_with_invalid_credentials(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid,
initial_heartbeat=False
)
try:
client.login('fakeuser', 'f00passb4r')
except ByteportLoginFailedException:
return
raise Exception("ByteportLoginFailedException was NOT thrown during invalid login!")
class TestHttpLoginAndAccesss(TestHttpClientBase):
def test_should_login_and_make_set_field_operation(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
# List Namespaces
result = client.set_fields('test', 'Control_example_device', {'Rubico app prop.Reboot': '1234'})
self.assertTrue(len(result) > 0)
def test_should_login_and_access_protected_resource(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
# List Namespaces
result = client.list_namespaces()
self.assertTrue(len(result) > 0)
# Query for matching devices
result = client.query_devices('test*', full=False, limit=10)
self.assertTrue(len(result) > 1)
# Load one device
result = client.get_device('test', '6000')
self.assertEqual(result[0]['guid'], 'test.6000')
# Obtain a list of all UIDs in this namespace
result = client.list_devices('test')
for device in result:
self.assertTrue(len(device['uid']) > 0)
# Obtain a list of Devices (with contents)
result = client.list_devices('test', 1)
for device in result:
self.assertTrue(len(device['active']) > 0)
#Devices
result = client.get_devices('test')
self.assertTrue( len(result) > 0 )
result = client.get_devices('test', "636744")
self.assertTrue( len(result) == 0, "Should not find any device with id 636744, found: %s" % len(result) )
result = client.get_devices('test', "TestGW")
self.assertTrue( len(result) == 1, "Should only find one device with uid=TestGW., found %s" % len(result) )
self.assertTrue( result[0][u'uid'] == u'TestGW', 'Device with id 1 should be the test GW, but was: "%s"' % result[0][u'uid'])
#Devicetypes
result = client.get_device_types('test')
self.assertTrue( len(result) > 0 )
result = client.get_device_types('test', "636744")
self.assertTrue( len(result) == 0, "Should not find any devicetype with id 636744, found: %s" % len(result) )
result = client.get_device_types('test', "1")
self.assertTrue( len(result) == 1, "Should only find one devicetype with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'name'] == u'Generic Test Gateway', 'Device with id 1 should be the test GW, but was: "%s"' % result[0][u'name'])
#device firmwares
result = client.get_firmwares('test', device_type_id='1')
self.assertTrue( len(result) > 0 )
result = client.get_firmwares('test', device_type_id="1", key="636744")
self.assertTrue( len(result) == 0, "Should not find any firmware with id 636744, found: %s" % len(result) )
result = client.get_firmwares('test', device_type_id="1", key="2")
self.assertTrue( len(result) == 1, "Should only find one device with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'filesize'] == u'6', 'Device fw with id 2 should have size 6, but was: "%s"' % result[0][u'filesize'])
#device field-definitions
result = client.get_field_definitions('test', device_type_id='2')
self.assertTrue( len(result) > 0 )
result = client.get_field_definitions('test', device_type_id="2", key="636744")
self.assertTrue( len(result) == 0, "Should not find any field definition with id 636744, found: %s" % len(result) )
result = client.get_field_definitions('test', device_type_id="2", key="5")
self.assertTrue( len(result) == 1, "Should only find one field definition with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'name'] == u'b64_jsons', 'Device field 5 of test gw should be "b64_jsons", but was: "%s"' % result[0][u'name'])
def test_should_login_and_access_timeseries_data(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
# List Namespaces
result = client.list_namespaces()
self.assertTrue(len(result) > 0)
# Load time-series data
to_time = datetime.datetime.now()
from_time = to_time - datetime.timedelta(hours=1)
result = client.load_timeseries_data_range('test', '6000', 'temp', from_time, to_time)
self.assertEqual(result['meta']['path'], u'test.6000.temp')
result = client.load_timeseries_data('test', '6000', 'temp', timedelta_minutes=180)
self.assertEqual(result['meta']['path'], u'test.6000.temp')
def test_should_login_and_send_message(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
message_to_device = 'hello from integration tests'
response = client.send_message('test', '6000', message_to_device)
# note the message SENT is also JSON, so there is another JSON structure embeedded in the response!
message = json.loads(response['message'])
self.assertEqual(message[0]['data'], message_to_device)
class TestHttpRegisterDevice(TestHttpClientBase):
def test_should_make_vaild_register_call_for_existing_device(self):
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
response = client.batch_register_devices('test', '6000', 1)
print json.dumps(response, indent=4)
self.assertIsNone(response.get('registration_result', None))
def test_should_make_vaild_register_call_for_existing_device_with_force_flag(self):
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
response = client.batch_register_devices('test', '6000', 1, force=True)
print json.dumps(response, indent=4)
self.assertIsNotNone(response.get('registration_result', None))
def test_should_register_one_new_device(self):
import random
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
response = client.batch_register_devices('test', random.randint(0, 100000), 1)
print json.dumps(response, indent=4)
self.assertIsNotNone(response.get('registration_result', None))
def test_should_NOT_register_device_with_invalid_device_type_id(self):
import random
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
response = client.batch_register_devices('test', random.randint(0, 100000), 252525)
print json.dumps(response, indent=4)
self.assertEqual(response.get('registration_result', None), None)
def test_should_batch_register_three_new_device(self):
import random
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
from_uid = random.randint(0, 100000)
to_uid = from_uid + 2
uid_range = '%s-%s' % (from_uid, to_uid)
response = client.batch_register_devices('test', uid_range, 1, batch_register=True)
print json.dumps(response, indent=4)
self.assertEqual(len(response['registration_result']['devices']), 3)
def test_should_try_register_with_invalid_uid_but_fail_with_200_result_and_error_message(self):
print "\n\n%s" % sys._getframe().f_code.co_name
print "---------------------------------------------------------------"
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
response = client.batch_register_devices('test', '#invaliduid_', 1)
print json.dumps(response, indent=4)
self.assertIsNone(response.get('registration_result', None))
class TestStompClient(unittest.TestCase):
TEST_BROKER = 'stomp.byteport.se'
test_user = 'stomp_test'
test_pass = '!!!stomp_test'
test_namespace = 'test'
test_device_uid = '6002'
running = True
def test_stomp_client__should_connect_send_and_subscribe_for_messages(self):
byteport_stomp_client = ByteportStompClient(
self.test_namespace, self.test_user, self.test_pass, broker_host=self.TEST_BROKER, device_uid=self.test_device_uid)
# NOTE: Trusted users can store data this way also.
# argument to store should be a dict where the key is the field name to store
byteport_stomp_client.store({'info': 'hello STOMP world!', 'temperature': 20}, self.test_device_uid)
# Just to exemplify how to run a thread blocking for new frames and working
frame = None
loops = 0
while self.running:
logging.info("Waiting for STOMP frames for device %s.%s (and any child devices)..." % (self.test_namespace, self.test_device_uid))
if byteport_stomp_client.client.canRead(60):
try:
# Block while waiting for frame (not can use canRead with a timeout also
frame = byteport_stomp_client.client.receiveFrame()
# NOTE: Messages can be sent to devices even if a Device is offline. This means
# that potentially old messages can appear. This can be a good or bad thing.
# The behaviour to filter out old messages is of course application specific.
#
# Remember that this client must have had its system clock correctly set before
# such filtering could be performed (this can be detected if you never expect
# old messages, messages from "future" could also be detected.
#
old_message_limit = 60
future_message_trap = -30
now = datetime.datetime.utcnow()
# Work with frame (payload in body) here. Note that m['uid'] could point to a child-device that shoul have the messag forwarded!
if frame.body.startswith('[{'):
messages = json.loads(frame.body)
for m in messages:
# This is a good place to reject old messages
dt = datetime.datetime.utcfromtimestamp(int(m['timestamp']))
message_age = (now - dt).total_seconds() # if dt is in future, this number will be negative
if message_age > old_message_limit:
logging.warn("Rejecting stale message!")
elif message_age < future_message_trap:
logging.warn("Rejecting message from future! (Local clock, skewed?, message age=%s, now=%s)" % (message_age, now))
# Note, if you trust server time, you could have the system clock set using the message date to ensure future messages will be received.
else:
if m['uid'] == self.test_device_uid and m['namespace'] == self.test_namespace:
logging.info(u"Got message for this device, Processing message to %s.%s (age=%s s.), payload was: %s." % (m['namespace'], m['uid'], message_age, m['data']))
else:
logging.info(u'Got message for child device %s, routing it down-stream etc...' % m['uid'])
else:
logging.info(u"Received plain text message: %s" % frame.body)
# Ack Frame when done processing
byteport_stomp_client.client.ack(frame)
except Exception as e:
logging.error("Caught exception, reason was %s" % e)
# Ack or Nack frame after errors
# ACK will requeue the frame and other consumers may consume it
# NACK will delete the message and will not be re-consumed.
if frame:
byteport_stomp_client.client.ack(frame)
else:
# Could not read, do something if you wish
pass
# Send STOMP heart beat in any case (NOTE: probably better to place in separate thread in real implementation)
byteport_stomp_client.store({'info': 'hello STOMP world!', 'loops': loops}, self.test_device_uid)
loops += 1
def test_shold_connect_and_disconnect_using_stomp_client(self):
byteport_stomp_client = ByteportStompClient(
self.test_namespace, self.test_user, self.test_pass, broker_host=self.TEST_BROKER, device_uid=self.test_device_uid)
# NOTE: Trusted users can store data this way also.
# argument to store should be a dict where the key is the field name to store
byteport_stomp_client.store({'info': 'hello STOMP world!', 'temperature': 20}, self.test_device_uid)
byteport_stomp_client.disconnect()
import threading
class TestMQTTClient(unittest.TestCase):
TEST_BROKER = 'broker.byteport.se'
test_device_uid = '6002'
def test_should_connect_to_namespace_vhost_using_mqtt_client(self):
client = ByteportMQTTClient(
'test', self.test_device_uid, 'eluw_test', 'eluw_test',
broker_host=self.TEST_BROKER, loop_forever=False)
thread1 = threading.Thread(target=client.block)
thread1.start()
while True:
client.store("Hello MQTT, this should normally be consumed by Bosses!!!")
time.sleep(2)
def test_should_connect_to_root_vhost_using_mqtt_client(self):
client = ByteportMQTTClient(
'test', self.test_device_uid, 'eluw_test', 'eluw_test',
broker_host=self.TEST_BROKER, loop_forever=False, explicit_vhost='/')
thread1 = threading.Thread(target=client.block)
thread1.start()
while True:
client.store('number=10')
time.sleep(2)
|
14,573 | c46adae45f24696128e2c93724002ce030cd90dd | #!/usr/bin/python
# Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# SPDX-License-Identifier: MIT-0
# Import modules
from loremipsum import get_sentences
import boto3
import names
import random
import string
import signal
import math
import time
import sys
# Global variables
dynamodb_table = "TwitterAnalysis"
provisioned_wcu = 1
# Initiate DynamoDB client
client = boto3.client('dynamodb')
# Signal handler, Ctrl+c to quit
def signal_handler(signal, frame):
print "\n"
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Actions
insert_to_ddb = True;
print_to_screen = True;
# Start the loop to generate simulated tweets
while(True) :
# Generate fake tweet
user_id = names.get_first_name()
tweet_id = str(random.randint(pow(10,16),pow(10,17)-1))
created_at = time.strftime("%a %b %d %H:%M:%S +0000 %Y", time.gmtime())
language = random.choice(['de', 'en', 'es', 'fr', 'id', 'nl', 'pt', 'sk'])
text = str(get_sentences(1)[0])
# Store tweet in DynamoDB
if insert_to_ddb == True :
res = client.put_item(
TableName=dynamodb_table,
Item={
'user_id' : { 'S' : user_id },
'tweet_id' : { 'N' : tweet_id },
'created_at': { 'S' : created_at },
'language' : { 'S' : language },
'text' : { 'S' : text }
})
# Print output to screen
if print_to_screen == True :
print "insert_to_ddb: %s" % insert_to_ddb
print "user_id : %s" % user_id
print "tweet_id : %s" % tweet_id
print "created_at : %s" % created_at
print "language : %s" % language
print "text : %s" % (text[:77] + '...' if len(text) > 80 else text)
print "\n==========================================="
# Loop control
time.sleep(1.0/provisioned_wcu)
|
14,574 | 281d7ba1aa0b720a467e151c4fb9c11c0114d42a | # Digit Fifth Powers https://projecteuler.net/problem=30
def sumOfFifthPower(n):
digits = [int(i) for i in str(n)]
result = 0
for j in digits:
result = result + j**5
return result
digitFifthPowers = []
for n in range(2, 1000000):
if(n == sumOfFifthPower(n)):
digitFifthPowers.append(n)
print(digitFifthPowers)
sum = 0
for x in digitFifthPowers:
sum += x
print(sum) |
14,575 | 64cd02e6f4b9b6d866af0f120106a17c48b309fe | import re
from datetime import datetime
import smtplib
sender = input ("Enter sender email: ")
password = input("Enter password: ")
receiver = input("Enter receiver email: ")
with open("domain_out.txt", "r") as f:
content = f.read()
pattern = "\d{4}[/.-]\w{2,3}[/.-]\d{2}|\d{8}|\d{1,2}[/.-]\w{1,3}[/.-]\d{4}|\w{1,4}[ ]\w{2,3}[ ]\d{4}"
dates = re.findall(pattern, content)
fmt = '%d/%B/%Y'
date_list = []
message_list = []
for date in dates:
if "-" in date:
try:
date = datetime.strptime(date, '%Y-%m-%d').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%Y-%b-%d').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%d-%b-%Y').strftime(fmt)
except ValueError:
date = datetime.strptime(date, '%d-%m-%Y').strftime(fmt)
elif "." in date:
try:
date = datetime.strptime(date, '%Y.%m.%d').strftime(fmt)
except ValueError:
date = datetime.strptime(date, '%d.%m.%Y').strftime(fmt)
elif " " in date:
try:
date = datetime.strptime(date, '%b %d %Y').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%dst %b %Y').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%dnd %b %Y').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%drd %b %Y').strftime(fmt)
except ValueError:
try:
date = datetime.strptime(date, '%dth %b %Y').strftime(fmt)
except ValueError:
pass
elif "/" in date:
try:
date = datetime.strptime(date, '%d/%m/%Y').strftime(fmt)
except ValueError:
pass
else:
date = datetime.strptime(date, '%Y%m%d').strftime(fmt)
date_list.append(date)
d1 = date_list
d2 = datetime.today().strftime(fmt)
d2 = datetime.strptime(d2, fmt)
domains = re.findall("([a-zA-Z]+(?:\.[a-zA-Z]+)+)\n.+(?=\d{4}[/.-]\w{2,3}[/.-]\d{2}|\d{8}|\d{1,2}[/.-]\w{1,3}[/.-]\d{4}|\w{1,4}[ ]\w{2,3}[ ]\d{4})", content)
domains = map(lambda s: s.strip(), domains)
main_list = [' '.join(x) for x in zip(domains,d1)]
for i in main_list:
i = i.split()
for a in i:
try:
a = datetime.strptime(a, fmt)
d = ((a-d2).days)
except ValueError:
None
if 0 <= d <= 60: #give warning in 60 days
message_list.append("Domain " + i.pop(0) + " is getting timed out in " + i.pop() + ". Only " + str(d) + " days left.")
message_list.append("These are all the domains that are getting timed out in 60 days!")
message = ' \n\n'.join([str(item) for item in message_list])
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(sender, password)
server.sendmail(sender, receiver, message)
server.quit()
print ("Email sent!")
|
14,576 | 078575eadd8ea4822a948f90c8a2d02d08772721 | class Counter:
def __init__(self):
self.count = 0
def count_up(self, channel):
self.count += 1
print('GPIO%02d count=%d' % (channel, self.count))
def __eq__(self, other):
return self.count == other
def __lt__(self, other):
return self.count < other
def main():
import RPi.GPIO as GPIO
import time
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup([5, 6, 13, 19, 26], GPIO.OUT)
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(9, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
print('Press GPIO10.')
while GPIO.input(10) == GPIO.LOW:
time.sleep(0.1)
GPIO.output(5, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.FALLING)
if channel == 10:
GPIO.output(6, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.RISING)
if channel == 10:
GPIO.output(13, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.BOTH)
if channel == 10:
GPIO.output(19, GPIO.HIGH)
while True:
print('Wait timeout.')
channel = GPIO.wait_for_edge(10, GPIO.BOTH, timeout=1000)
if channel is None:
GPIO.output(26, GPIO.HIGH)
break
print('Press GPIO10.')
GPIO.add_event_detect(10, GPIO.RISING)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(5, GPIO.LOW)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.FALLING)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(6, GPIO.LOW)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.BOTH)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(13, GPIO.LOW)
GPIO.remove_event_detect(10)
def risen(ch):
print('risen GPIO%02d' % ch)
def fallen(ch):
print('fallen GPIO%02d' % ch)
GPIO.add_event_detect(10, GPIO.RISING, risen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(5, GPIO.HIGH)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.FALLING, fallen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(6, GPIO.HIGH)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.BOTH, risen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(13, GPIO.HIGH)
GPIO.remove_event_detect(10)
def changed(ch):
print('changed GPIO%02d' % ch)
GPIO.add_event_detect(10, GPIO.BOTH)
GPIO.add_event_callback(10, fallen)
GPIO.add_event_callback(10, changed)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(26, GPIO.LOW)
GPIO.remove_event_detect(10)
print('Press! Press! Press!')
counter = Counter()
GPIO.add_event_detect(10, GPIO.RISING, callback=counter.count_up, bouncetime=100)
GPIO.add_event_callback(10, counter.count_up, bouncetime=500)
while counter < 10:
time.sleep(1)
GPIO.output(19, GPIO.LOW)
GPIO.remove_event_detect(10)
time.sleep(1)
finally:
GPIO.cleanup()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
main()
|
14,577 | f0fdf95627426b1f15dba33cda0fd013937c2c96 | import tensorflow as tf
import numpy as np
# restore variables
# redefine the same shape and same type for your variables
W = tf.Variable(np.arange(6).reshape(2, 3), dtype=tf.float32, name='Weights')#name must be the same with saver
b = tf.Variable(np.arange(3).reshape(1, 3), dtype=tf.float32, name='biases')
# not need init step
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "my_net/save_net.ckpt")
print("weight:", sess.run(W))
print("biases:", sess.run(b))
|
14,578 | aa51b1a271afed28c2a84ce14dec6f43405bc618 | # Please write your HelloWorld.py code and commit it!
print'hello word'
|
14,579 | c46fe4e3846b0ac73e756410853500e9ce500248 | from django.db import models
class Document(models.Model):
category = models.CharField(max_length=20)
content = models.TextField()
url = models.TextField()
class MongoMeta:
db_table = "DocsManageApp_document"
def __str__(self):
return self.content
class Category(models.Model):
categoryName = models.CharField(max_length=20)
class MongoMeta:
db_table = "DocsManageApp_category"
def __str__(self):
return self.categoryName
#MongoMeta를 통해 db_table = "DocsManageApp_category" 존재하는 db사용 |
14,580 | c9ba4229c9d87c8ca08f86f43a3d144be0d954ae | # Author: Sean Fallon
# Shout out to Kenneth Love
# Date Created:
# Date Changed:
# All standard imports used in Django
from django.http import HttpResponse
from django.shortcuts import render_to_response , get_object_or_404
from django.template import RequestContext
# Custom
from epictools.models import epicTools, Post
# request is the httprequest that comes in.
def epictools_index(request):
epicBlogTools = epicTools.objects.filter(active=True)
#must have full path in order to render template.
return render_to_response('///home/sean/newproject/epictools/templates/index.html', {
'epicBlogTools': epicBlogTools
# standard in Django: context_instance=RequestContext(request)
# context instance built from existing request.
}, context_instance=RequestContext(request)) # standard in Django: context_instance=RequestContext(request)
# return HttpResponse("You've reached my epic tool!")
# Two variables: (request, slug)
def epicBlog(request, slug):
epicBigBlog = get_object_or_404(epicTools, active=True, slug=slug)
return render_to_response('///home/sean/newproject/epictools/templates/index.html', {
'epicBigBlog': epicBigBlog
}, context_instance=RequestContext(request))
|
14,581 | c8d41e8d476504b5c8bc44fc925bd071efe10d13 | from itertools import product
arrays = [(-1,1),(-3,3),(-5,5)]
cp = list(product(*arrays))
print(cp) |
14,582 | a2e659e87772ca15c81a402fc75aad580fb05c7d | # coding: utf-8
from rqalpha import run_func
from rqalpha.api import *
import talib
from sklearn.model_selection import train_test_split
import time
from datetime import datetime, timedelta
import warnings
import numpy as np
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import keras
from keras.models import load_model
from keras.models import model_from_json
"""
Bar(symbol: u'\u73e0\u6c5f\u94a2\u7434', order_book_id: u'002678.XSHE', datetime: datetime.datetime(2014, 1, 2, 0, 0),
open: 7.08, close: 7.07, high: 7.14, low: 7.03, volume: 3352317.0, total_turnover: 23756852, limit_up: 7.78, limit_down: 6.36)
rqalpha run -f lstm_one_backtest.py -s 2017-01-09 -e 2018-3-09 -o result.pkl --plot --progress --account stock 10000
http://scikit-learn.org/stable/modules/preprocessing.html#standardization-or-mean-removal-and-variance-scaling
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
"""
#scheduler调用的函数需要包括context, bar_dict两个参数
def log_cash(context, bar_dict):
pass
#logger.info("Remaning cash: %r" % context.portfolio.cash)
"""
if context.is_buy_point:
order = order_percent(context.s1, 1)
if order:
logger.info("----------下单成功 下单成功---------下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功----------买入价 %s" % order.avg_price)
context.buy_price = order.avg_price
context.buy = True
if context.is_sell_point:
order_target_value(context.s1, 0)
logger.info("---------------清仓 --------清仓清仓清仓清仓清仓清仓-------清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓-------")
context.buy = False
"""
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
# 在context中保存全局变量
context.s1 = context.config.stock_id
#context.s1 = "002443.XSHE"
context.s1_open_price = 0
context.buy = False
context.buy_price = 0
context.is_buy_point = False
context.is_sell_point = False
context.is_stop_loss = False
# 实时打印日志
context.ORDER_PERCENT = 0.2
context.restore_predicted = 0
context.yesterday_close = 0
context.s1_X = []
context.s1_y = []
context.predicted = False
context.error = 0
context.ok = 0
"""
context.model = load_model('model/%s.h5' % context.s1)
context.model.compile(loss="mse", optimizer="rmsprop")
"""
json_file = open("weight_json_week/%s.h5"% context.s1, 'r')
loaded_model_json = json_file.read()
json_file.close()
context.model_by_week = model_from_json(loaded_model_json)
context.model_by_week.load_weights("weight_week/%s.h5" % context.s1)
"""
json_file = open("weight_json/%s.h5"% context.s1, 'r')
loaded_model_json = json_file.read()
json_file.close()
context.model = model_from_json(loaded_model_json)
context.model.load_weights("weight/%s.h5" % context.s1)
"""
logger.info("RunInfo: {}".format(context.run_info))
df = (all_instruments('CS'))
context.all = df["order_book_id"]
scheduler.run_weekly(log_cash, weekday=2)
# before_trading此函数会在每天策略交易开始前被调用,当天只会被调用一次
def before_trading(context):
#logger.info("开盘前执行before_trading函数")
history_close = history_bars(context.s1, 30, '1d', 'close')
#logger.info(history_close)
normalised_history_close = [((float(p) / float(history_close[0])) - 1) for p in history_close]
normalised_history_close = np.array(normalised_history_close)
normalised_history_close = normalised_history_close[newaxis,:]
normalised_history_close = normalised_history_close[:,:,newaxis]
predicted = context.model_by_week.predict(normalised_history_close)[0,0]
normalised_history_close = [((float(p) / float(history_close[0])) - 1) for p in history_close]
normalised_history_close.append(predicted)
restore_normalise_window = [float(history_close[0]) * (float(p) + 1) for p in normalised_history_close]
restore_predicted = restore_normalise_window[-1]
context.restore_predicted = restore_predicted
#logger.info("yesterday %s predict %s" % (history_close[-1], restore_predicted))
context.yesterday_close = history_close[-1]
day3 = history_bars(context.s1, 5, '1d', 'close')
day3_avg = sum(day3) /5
logger.info("3day avg %s" % day3_avg)
logger.info("买点 昨天收盘 %s 均线5日 %s lstm:%s" % (history_close[-1], day3_avg, restore_predicted))
#if history_close[-1] < day3_avg and history_close[-1] < restore_predicted:
if history_close[-1] < restore_predicted:
#if history_close[-1] < day3_avg:
#logger.info("下星期涨---------下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨下星期涨--------")
#logger.info("yesterday %s predict %s" % (history_close[-1], restore_predicted))
context.is_buy_point = True
else:
context.is_buy_point = False
if context.buy_price:
#if context.buy_price > history_close[-1] and (context.buy_price - history_close[-1]) / context.buy_price > 0.10:
if history_close[-1] > day3_avg:
#if history_close[-1] > day3_avg:
#logger.info(history_close[-1])
#logger.info(context.buy_price)
#logger.info((history_close[-1] - context.buy_price) / context.buy_price)
context.is_sell_point = True
if context.buy_price:
if context.buy_price > history_close[-1] and ( context.buy_price - history_close[-1]) / context.buy_price > 0.08:
#logger.info(history_close[-1])
#logger.info(context.buy_price)
#logger.info((history_close[-1] - context.buy_price) / context.buy_price)
logger.info("止损止损止损 我的股票价格:%s 现在的股票价格:%s " % (context.buy_price,history_close[-1] ))
context.is_stop_loss = True
def normalise_windows(window_data):
normalised_data = []
for window in window_data:
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
#logger.info("每一个Bar执行")
history_close = history_bars(context.s1, 1, '1d', 'close')
today = bar_dict[context.s1].datetime
d2 = today + timedelta(days=7)
week_ratio = (context.restore_predicted - bar_dict[context.s1].close) / bar_dict[context.s1].close
logger.info("今天收盘价 %s 预测价一星期后 %s 涨%s %s" % ( bar_dict[context.s1].close,context.restore_predicted,week_ratio, d2))
if week_ratio > 0.10:
logger.info("我股票的价 %s" % context.buy_price)
logger.info("昨天天收盘价 %s" % context.yesterday_close)
logger.info("今天收盘价 %s" % bar_dict[context.s1].close)
logger.info("买点 %s" % context.is_buy_point)
logger.info("卖点 %s" % context.is_sell_point)
logger.info("止损 %s" % context.is_stop_loss)
"""
if context.buy_price:
if context.buy_price < bar_dict[context.s1].close:
logger.info("---------------赚-------哈哈哈------")
else:
logger.info("---------亏 ------------")
else:
logger.info("-----------没有股票-----------")
"""
if not context.buy:
if context.is_buy_point:
order = order_percent(context.s1, 1)
if order:
logger.info("----------下单成功 下单成功---------下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功下单成功----------买入价 %s" % order.avg_price)
logger.info("预测价一星期后 %s 涨%s %s" % (context.restore_predicted,week_ratio, d2))
context.buy_price = order.avg_price
context.buy = True
day3 = history_bars(context.s1, 3, '1d', 'close')
day3_avg = sum(day3) /3
logger.info("3day avg %s" % day3_avg)
else:
logger.info("----------下单失败 下单失败---------下单失败下单失败下单失败下单失败下单失败下单失败")
if context.buy and context.is_sell_point:
order = order_target_value(context.s1, 0)
if order:
logger.info("---------------清仓 --------清仓清仓清仓清仓清仓清仓-------清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓清仓----%s---" % order.avg_price)
context.buy = False
context.is_sell_point = False
context.buy_price = 0
else:
logger.info("---------------清仓失败 --------清仓失败清仓失败清仓失败清仓失败清仓失败清仓失败")
if context.is_stop_loss:
order = order_target_value(context.s1, 0)
if order:
logger.info("---------------止损----------------止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损止损---")
context.buy = False
context.is_stop_loss = False
context.buy_price = 0
"""
if context.predicted and not context.buy and not is_suspended(context.s1):
order = order_percent(context.s1, 1)
if order:
logger.info("买入价 %s" % order.avg_price)
context.buy = True
else:
if context.buy and not is_suspended(context.s1):
order_target_value(context.s1, 0)
context.buy = False
"""
# after_trading函数会在每天交易结束后被调用,当天只会被调用一次
def after_trading(context):
pass
#logger.info("收盘后执行after_trading函数")
config = {
"base": {
"start_date": "2017-03-09",
"end_date": "2018-03-09",
"accounts": {
"stock": 100000
}
},
"extra": {
"log_level": "verbose",
},
"mod": {
"sys_analyser": {
"enabled": True,
"plot": True
}
}
}
#run_func(init=init, before_trading=before_trading, handle_bar=handle_bar, config=config) |
14,583 | b2e7eff1be1bca92fbb717a4f4690b9098d17e7a | import turtle
def draw_art():
turtle.shape("turtle")
turtle.color("red")
turtle.speed('slow')
for i in range(1, 37):
turtle.right(20)
turtle.right(90)
turtle.forward(300)
if __name__ == '__main__':
draw_art()
|
14,584 | e0c24c1721667289789d1537611cdc189013e287 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-23 13:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20170505_1130'),
]
operations = [
migrations.CreateModel(
name='CbTempPassword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.UUIDField(default=uuid.uuid4, editable=False)),
('used', models.BooleanField(default=False)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
options={
'db_table': 'cb_temp_password',
},
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Un select this instead of deleting accounts.', verbose_name='active'),
),
migrations.AddField(
model_name='cbtemppassword',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
14,585 | 7613a24a637dccc315cbaf8220605e59d46ce1e7 | from django.apps import AppConfig
class DOTConfig(AppConfig):
name = "azure_ad_sso"
verbose_name = "Django Azure Active Directory Single-Sign-On" |
14,586 | ca002dcb5aeddf98620cfa9b1977f8b803f46717 | # -*- coding: mbcs -*-
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from optimization import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
import csv,os
import numpy as np
from Plotting.information_network import load_network_info, sorted_ls, load_info_test
from Network_generation import *
import fnmatch
os.chdir('../Data_1/default/')
os.chdir(sorted_ls('.')[-1])
#os.chdir(sorted_ls('.')[-1])
print(os.getcwd())
filenames=sorted(fnmatch.filter(sorted_ls('.'), 'network_vertices_initial_*.csv'))
network = load_network_info(int(filenames[-1][-9:-4]))
#network = load_network_info(326)
"""network.ridge_vertices = np.delete(network.ridge_vertices,576,axis=0)
network.ridge_vertices = np.delete(network.ridge_vertices,574,axis=0)
network.ridge_vertices = np.delete(network.ridge_vertices,432,axis=0)
network.ridge_vertices = np.delete(network.ridge_vertices,365,axis=0)
network.ridge_vertices = np.delete(network.ridge_vertices,289,axis=0)
"""
#network = network.create_ridge_node_list()
#network = network.sort_nodes()
#tensile_test = load_info_test(326)
#tensile_test.element_size = 0.001
tensile_test = load_info_test(int(filenames[-1][-9:-4]))
space_discretization=tensile_test.space_discretization
traction_distance = tensile_test.traction_distance
iterations = abs(int(traction_distance / space_discretization))
myModel = mdb.models['Model-1']
vertices=network.vertices
ridge_vertices=network.ridge_vertices
test_number = int(np.random.rand()*10e8)
#### PART DEFINITION ####
def define_part():
if int(network.dimension) == 2:
for i in range(len(ridge_vertices)):
ridge = ridge_vertices[i]
partname = 'Part-' + str(i+1)
myModel.Part(dimensionality=TWO_D_PLANAR, name=partname, type=
DEFORMABLE_BODY)
try:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[0]][0],vertices[ridge[0]][1], 0.0))
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[1]][0],vertices[ridge[1]][1], 0.0))
except IndexError:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[i][0],vertices[i][1], 0.0))
myModel.parts[partname].WirePolyLine(mergeType=IMPRINT, meshable=
ON, points=((myModel.parts[partname].datums[1],
myModel.parts[partname].datums[2]), ))
elif int(network.dimension)==3:
for i in range(len(ridge_vertices)):
ridge = ridge_vertices[i]
partname = 'Part-' + str(i+1)
myModel.Part(dimensionality=THREE_D, name=partname, type=
DEFORMABLE_BODY)
try:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[0]][0],vertices[ridge[0]][1], vertices[ridge[0]][2]))
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[ridge[1]][0],vertices[ridge[1]][1], vertices[ridge[1]][2]))
except IndexError:
myModel.parts[partname].DatumPointByCoordinate(coords=(vertices[i][0],vertices[i][1], vertices[i][2]))
myModel.parts[partname].WirePolyLine(mergeType=IMPRINT, meshable=
ON, points=((myModel.parts[partname].datums[1],
myModel.parts[partname].datums[2]), ))
#### MATERIAL AND SECTION DEFINITION ####
# Truss section
def define_material(network):
myModel.Material(name='Material-2')
myModel.materials['Material-2'].Elastic(table=((network.beam_young, network.beam_poisson), ))
myModel.CircularProfile(name='Profile-1', r=network.beam_profile)
myModel.BeamSection(consistentMassMatrix=False, integration=
DURING_ANALYSIS, material='Material-2', name='Section-2', poissonRatio=0.0,
profile='Profile-1', temperatureVar=LINEAR)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )),
sectionName='Section-2', thicknessAssignment=
FROM_SECTION)
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
myModel.parts[partname].assignBeamSectionOrientation(method=
N1_COSINES, n1=(0.0, 0.0, -1.0), region=Region(
edges=myModel.parts[partname].edges.getSequenceFromMask(mask=('[#1 ]', ), )))
mdb.models['Model-1'].rootAssembly.DatumCsysByDefault(CARTESIAN)
#### ASSEMBLY ####
# Creation of instances
def assembly(network):
list_node_label=[]
for i in range(len(ridge_vertices)):
partname = 'Part-' + str(i+1)
instancename = 'Part-' + str(i+1) + '-1'
myModel.rootAssembly.Instance(dependent=OFF, name=instancename,
part=myModel.parts[partname])
for k in range(len(vertices)):
ridge=network.list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_node_label.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords).index)
filename = 'node_label_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(list_node_label)
return list_node_label
# Step Creation
def set_steps(network):
myModel.StaticStep(name='Step-1', previous='Initial',maxNumInc=1000, minInc=1e-10, nlgeom=ON)
#myModel.FieldOutputRequest(name='F-Output-3',createStepName='Step-1', variables=('COORD', 'S','E','SE'),numIntervals=
# iterations)
myModel.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'E', 'U', 'RF', 'CF', 'COORD'))
myModel.fieldOutputRequests['F-Output-1'].setValues(
numIntervals=50)
myModel.steps['Step-1'].setValues(stabilizationMethod=DISSIPATED_ENERGY_FRACTION,
continueDampingFactors=True, adaptiveDampingRatio=0.1)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
list_vertices_left.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))
myModel.PinnedBC(createStepName='Initial', localCsys=None, name=
'BC-1', region=Region(vertices=VertexArray(list_vertices_left)))
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1',
distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name=
'BC-2', region=Region(vertices=VertexArray(list_vertices_right)), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
def set_boundary_conditions(network): ## can be changed with the node label list
#list_vertices_right = []
for k in network.boundary_nodes_right:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
else:
myModel.DisplacementBC(amplitude=UNSET, createStepName='Step-1', distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]), u1=traction_distance, u2=0.0,u3=0.0,ur3=UNSET)
"""
list_vertices_right.append(myModel.rootAssembly.instances[instancename].vertices.findAt(coords))"""
#list_vertices_left = []
for k in network.boundary_nodes_left:
ridge=list_nodes_ridges[k]
#print(ridge)
instancename = 'Part-' + str(ridge[0]+1) + '-1'
try:
coords = (vertices[k][0],vertices[k][1],vertices[k][2])
except IndexError:
coords = (vertices[k][0],vertices[k][1],0.0)
vertice = myModel.rootAssembly.instances[instancename].vertices.findAt(coords)
if vertice.index==0:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[:1]))
else:
myModel.PinnedBC(createStepName='Initial', localCsys=None, name='BC-'+str(ridge[0]+1), region=Region(vertices=mdb.models['Model-1'].rootAssembly.instances[instancename].vertices[1:]))
## to be deleted and adapted with network
def define_mesh(mask):
number_elements = []
for i in range(len(ridge_vertices)):
instancename = 'Part-' + str(i+1) + '-1'
#myModel.rootAssembly.setElementType(elemTypes=(ElemType(
# elemCode=B21, elemLibrary=STANDARD), ), regions=(
# myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
# mask=('[#1 ]', ), ), ))
if int(network.dimension)==2:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B22, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
elif int(network.dimension)==3:
myModel.rootAssembly.setElementType(elemTypes=(ElemType(
elemCode=B32, elemLibrary=EXPLICIT), ), regions=(
myModel.rootAssembly.instances[instancename].edges.getSequenceFromMask(
mask=('[#1 ]', ), ), ))
myModel.rootAssembly.seedPartInstance(regions=(
mdb.models['Model-1'].rootAssembly.instances[instancename], ), size=tensile_test.element_size)
mdb.models['Model-1'].rootAssembly.generateMesh(regions=(
mdb.models['Model-1'].rootAssembly.instances[instancename], ))
number_elements.append(len(mdb.models['Model-1'].rootAssembly.instances[instancename].elements))
filename = 'number_elements_%09d.csv' % test_number
with open(filename,'w') as writeFile:
writer = csv.writer(writeFile,delimiter=',')
writer.writerow(number_elements)
list_nodes_ridges=[[] for i in range(len(vertices))]
for i in range(len(ridge_vertices)):
list_nodes_ridges[ridge_vertices[i][0]].append(i)
list_nodes_ridges[ridge_vertices[i][1]].append(i)
def create_connectors(network):
connector_list=[]
for k in range(len(list_nodes_ridges)):
if int(network.dimension)==2: coords = (vertices[k][0],vertices[k][1],0.0)
elif int(network.dimension)==3: coords = (vertices[k][0],vertices[k][1],vertices[k][2])
list_ridge = list_nodes_ridges[k]
if len(list_ridge) > 1:
for i in range(len(list_ridge)-1):
instancename1='Part-'+str(list_ridge[i]+1)+'-1'
instancename2='Part-'+str(list_ridge[i+1]+1)+'-1'
vertice1 = myModel.rootAssembly.instances[instancename1].vertices.findAt(coords)
vertice2 = myModel.rootAssembly.instances[instancename2].vertices.findAt(coords)
connector_list.append((vertice1, vertice2))
myModel.rootAssembly.WirePolyLine(mergeType=IMPRINT, meshable=OFF
, points=tuple(connector_list))
mask = mdb.models['Model-1'].rootAssembly.edges.getMask()
mdb.models['Model-1'].rootAssembly.Set(edges=
mdb.models['Model-1'].rootAssembly.edges.getSequenceFromMask((mask[0],
), ), name='Wire-2-Set-1')
mdb.models['Model-1'].ConnectorSection(name='ConnSect-1', translationalType=
JOIN)
mdb.models['Model-1'].sections['ConnSect-1'].setValues(behaviorOptions=(
ConnectorElasticity(table=((network.connector_coeff, network.connector_coeff,network.connector_coeff), ), independentComponents=(),
components=(4, 5, 6)), ), rotationalType=ROTATION)
mdb.models['Model-1'].sections['ConnSect-1'].behaviorOptions[0].ConnectorOptions(
)
myModel.rootAssembly.SectionAssignment(region=
myModel.rootAssembly.sets['Wire-2-Set-1'], sectionName=
'ConnSect-1')
return mask
define_part()
define_material(network)
list_node_label=assembly(network)
set_steps(network)
mask = create_connectors(network)
network = network.sort_nodes()
set_boundary_conditions(network)
#### JOB ####
job_name = 'Job-'+str(test_number)
def job():
mdb.Job(atTime=None, contactPrint=OFF, description='', echoPrint=OFF,
explicitPrecision=SINGLE, getMemoryFromAnalysis=True, historyPrint=OFF,
memory=90, memoryUnits=PERCENTAGE, model='Model-1', modelPrint=OFF,
multiprocessingMode=MPI, name=job_name, nodalOutputPrecision=SINGLE,
numCpus=4, numDomains=4,numGPUs=0, queue=None, resultsFormat=ODB, scratch='', type=
ANALYSIS, userSubroutine='', waitHours=0, waitMinutes=0)
mdb.jobs[job_name].submit(consistencyChecking=OFF)
mdb.jobs[job_name].waitForCompletion()
define_mesh(mask)
job()
def write_stress_report(odb,filename,network):
picked_nodes =[]
for node in network.boundary_nodes_right:
part = network.list_nodes_ridges[node]
instancename='PART-'+str(part[0]+1)+'-1'
p = odb.rootAssembly.instances[instancename]
for k in range(len(p.nodes)):
if p.nodes[k].coordinates[0]>=1-10e-6:
picked_nodes.append(p.nodes[k:k+1])
break
#if p.nodes[-1].coordinates[0]>=1-10e-4: picked_nodes.append(p.nodes[-1:])
#else: picked_nodes.append(p.nodes[:1])
node_set_name='node_set_bc_8_' +str(len(network.vertices))
odb.rootAssembly.NodeSet(name = node_set_name, nodes = picked_nodes)
reports=session.xyDataListFromField(odb=odb, outputPosition=NODAL, variable=(('RF', NODAL, ((COMPONENT, 'RF1'), )), ),nodeSets=(node_set_name, ))
x_data = sum(reports)
x0 = session.xyDataObjects[x_data.name]
session.writeXYReport(fileName=filename, xyData=(x0, ))
from odbAccess import *
o1 = session.openOdb(name=job_name+'.odb',readOnly=False)
session.viewports['Viewport: 1'].setValues(displayedObject=o1)
odb = session.odbs[job_name+'.odb']
nf = NumberFormat(numDigits=6, precision=0, format=SCIENTIFIC)
session.fieldReportOptions.setValues(reportFormat=COMMA_SEPARATED_VALUES,
numberFormat=nf)
for j in range(len(odb.steps)):
stepname = 'Step-%d' % (j+1)
k=0
stress_data = 'stress_data_%02d_%09d.rpt' % (j+1,int(test_number))
write_stress_report(odb,stress_data,network)
for i in range(len(odb.steps[stepname].frames)):
lastFrame=odb.steps[stepname].frames[i]
name='network_vertices_%02d_%02d_%09d.csv' % (j+1,k,int(test_number))
print(i)
print(lastFrame.frameValue, (k)*0.1)
#if abs(lastFrame.frameValue- (k)*0.1) <10e-5:
session.writeFieldReport(fileName=name,append=OFF,sortItem='Node Label',
odb=odb,step=0,frame=lastFrame,outputPosition=NODAL,variable=(('COORD', NODAL),))
k+=1
|
14,587 | 74792f29918f3195ede3621db5396c6023cb0ee6 | import luigi
from luigi.contrib.external_program import ExternalProgramTask
from os import path
from .utils import MetaOutputHandler
from .utils import Wget
from .utils import GlobalParams
from .reference import ReferenceGenome
from .align import FastqAlign
class SortSam(ExternalProgramTask):
def requires(self):
return FastqAlign()
def output(self):
return luigi.LocalTarget(
path.join(GlobalParams().base_dir,
GlobalParams().exp_name+'.bam')
)
def program_args(self):
return ['samtools', 'sort', '-@', AlignProcessing().cpus,
self.input()['sam'].path, '-o', self.output().path
]
class IndexBam(ExternalProgramTask):
def requires(self):
return SortSam()
def output(self):
return luigi.LocalTarget(self.input().path+'.bai')
def program_args(self):
return ['samtools', 'index', '-@', AlignProcessing().cpus, self.input().path]
class PicardMarkDuplicates(ExternalProgramTask):
def requires(self):
return {'index' : IndexBam(),
'bam' : SortSam()
}
def output(self):
return {
'bam' : luigi.LocalTarget( \
path.join(GlobalParams().base_dir, GlobalParams().exp_name+'_nodup.bam')),
'metrics' : luigi.LocalTarget( \
path.join(GlobalParams().base_dir, GlobalParams().exp_name+'_MD.matrix'))
}
def program_args(self):
return ['picard', 'MarkDuplicates',
'I='+self.input()['bam'].path,
'O='+self.output()['bam'].path,
'METRICS_FILE='+self.output()['metrics'].path,
'REMOVE_DUPLICATES=true'
]
class IndexNoDup(ExternalProgramTask):
def requires(self):
return PicardMarkDuplicates()
def output(self):
return luigi.LocalTarget(self.input()['bam'].path+'.bai')
def program_args(self):
return ['samtools', 'index', '-@', AlignProcessing().cpus, self.input()['bam'].path]
class AlignProcessing(MetaOutputHandler, luigi.WrapperTask):
cpus = luigi.Parameter()
def requires(self):
return {
'bam' : SortSam(),
'bai' : IndexBam(),
'bamNoDup' : PicardMarkDuplicates(),
'indexNoDup' : IndexNoDup()
}
if __name__ == '__main__':
luigi.run(['AlignProcessing',
'--AlignProcessing-cpus', '6',
'--FastqAlign-cpus', '6',
'--FastqAlign-create-report', 'True',
'--GetFastq-fastq1-url', '',
'--GetFastq-fastq2-url', '',
'--GetFastq-from-ebi', 'False',
'--GetFastq-paired-end', 'True',
'--ReferenceGenome-ref-url', 'ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.2bit',
'--ReferenceGenome-from2bit', 'True',
'--GlobalParams-base-dir', path.abspath(path.curdir),
'--GlobalParams-log-dir', path.abspath(path.curdir),
'--GlobalParams-exp-name', 'hg19'])
|
14,588 | def2729bd2634c947ea50cde869f54db81f92967 | """Discovery service for UniFi."""
import voluptuous as vol
from supervisor.validate import network_port
from ..const import ATTR_HOST, ATTR_PORT
SCHEMA = vol.Schema(
{vol.Required(ATTR_HOST): vol.Coerce(str), vol.Required(ATTR_PORT): network_port}
)
|
14,589 | 04e1b6ff2ead4ada8d9e3d04cd1735a95e628437 | from calculos.basicos.operacionesBasicas import *
dividir(4, 2)
|
14,590 | d1813a621a1158f4d1d5641d31aa45b9dcb7c206 | import imaplib
import sys
from recover import *
FILE_NAME = 'mail_address' # Nombre del archivo con las direcciones a analizar
# Datos del usuario (dirección de correo y contraseña)
cred = open('credentials', 'r')
lines = cred.readlines()
USR_EMAIL = lines[0].strip('\n') # Correo electrónico
USR_PW = lines[1] # Contraseña del correo electrónico
# Hostname del servicio de correo, Dirección IMAP
HOSTNAME = 'imap-mail.outlook.com' # Si es Hotmail/Outlook, Gmail = 'imap.gmail.com'
# Arreglo de 2 dimensiones para guardar los datos del archivo FILENAME
lista = [[]]
# Se recuperan las direcciones en el archivo y se depliega un menú para elegir una o todas las direcciones
with open(FILE_NAME, newline='') as file:
data = file.readlines()
number = 1
print("\n Elija una de las siguientes direcciones: ")
for row in data:
if row == '':
break
row = row.strip("\r\n")
print( ' ' + str(number)+ ') ' + row)
lista[number-1].append(row)
number += 1
lista.append([])
print( ' ' + str(number)+ ') ' + 'Todas')
print('\n Escriba el número correspondiente a la dirección de correo elegida: ')
num = input()
while True:
if num.isnumeric():
if int(num) < 1 or int(num) > number:
print("Entrada inválida, por favor ingrese un numero válido.")
num = input()
else:
break
else:
print("Entrada inválida, por favor ingrese un numero válido.")
num = input()
# Conexión con el servicio
conn = imaplib.IMAP4_SSL(HOSTNAME)
# Inicio de sesión
try:
conn.login(USR_EMAIL, USR_PW)
print("Se inició sesión correctamente!")
except:
print("Error al iniciar sesión, se recomienda revisar los datos de ingreso.")
sys.exit()
# Se selecciona la bandeja de entrada
response, data = conn.select('INBOX')
if response:
print("Se seleccionó la bandeja de entrada.\n")
# Dependiendo de si se eligió una o todas, se recupera/n la/s direccion/es
# Se llama la función que extrae la información y la alamacena en un archivo de texto
if number == int(num):
for i in range(number-1):
print("______________________________________________________________________")
address = lista[i][0]
print('\n Dirección: ' + lista[i][0])
recover(address, conn)
else:
print("______________________________________________________________________")
address = lista[int(num)-1][0]
print('\n Dirección: ' + address)
recover(address, conn)
# Se cierra la conexión
conn.close()
print('Cerrada la conexión con el servidor.') |
14,591 | 890c13a0d8b6afc49f40c39709d8a8e802a883bb | # coding=utf-8
import socket
def get_plugin_info():
plugin_info = {
"name": "Jetty 共享缓存区远程泄露",
"info": "攻击者可利用此漏洞获取其他用户的请求信息,进而获取其权限",
"exp_url": "https://www.secpulse.com/archives/4911.html",
"other": "tag:jetty",
"docker": ""
}
return plugin_info
def check(ip, port, timeout):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((ip, int(port)))
flag = "GET / HTTP/1.1\r\nReferer:%s\r\n\r\n" % (chr(0) * 15)
s.send(flag)
data = s.recv(512)
s.close()
if 'state=HEADER_VALUE' in data and '400' in data:
return u"jetty 共享缓存区远程泄露漏洞"
except:
pass
|
14,592 | 6504c4bf81317f2403567cfa52a2a5d5ac108abd | #!/usr/bin/env python
import hid
class dna75:
_vendorId = 0x268b
_productId = 0x0408
def __init__(self):
self._connect()
def _connect(self):
self.h = hid.device()
self.h.open(self._vendorId, self._productId)
self.h.set_nonblocking(1)
print('Connecting %s [%s] (%s)' % (
self.h.get_product_string(),
self.h.get_manufacturer_string(),
self.h.get_serial_number_string()))
def disconnect(self):
self.h.close()
class helix:
def __init__(self):
try:
dna = dna75()
dna.disconnect()
except OSError:
print('there is no dna chip available')
if __name__ == '__main__':
helix()
|
14,593 | 984ebb830e8dedb5e4257f1eb553b6428a27ab11 | #Exploring Datasets with pandas and Matplotlib
#A pie chart is a circualr graphic that displays numeric proportions by dividing a circle (or pie) into proportional slices. You are most likely already familiar with pie charts as it is widely used in business and media. We can create pie charts in Matplotlib by passing in the kind=pie keyword..
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
import matplotlib as mpl
import matplotlib.pyplot as plt
df_can = pd.read_excel('Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2)
print ('Data read into a pandas dataframe!')
# clean up the dataset to remove unnecessary columns (eg. REG)
df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)
# let's rename the columns so that they make sense
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)
# for sake of consistency, let's also make all column labels of type string
df_can.columns = list(map(str, df_can.columns))
# set the country name as index - useful for quickly looking up countries using .loc method
df_can.set_index('Country', inplace=True)
# add total column
df_can['Total'] = df_can.sum(axis=1)
# years that we will be using in this lesson - useful for plotting later on
years = list(map(str, range(1980, 2014)))
print('data dimensions:', df_can.shape)
#We will use pandas groupby method to summarize the immigration data by Continent. The general process of groupby involves the following steps
# group countries by continents and apply sum() function
df_continents = df_can.groupby('Continent', axis=0).sum()
# note: the output of the groupby method is a `groupby' object.
# we can not use it further until we apply a function (eg .sum())
print(type(df_can.groupby('Continent', axis=0)))
print(df_continents.head())
df_continents['Total'].plot(kind='pie',
figsize=(5, 6),
autopct='%1.1f%%', # add in percentages
startangle=90, # start angle 90° (Africa)
)
plt.title('Immigration to Canada by Continent [1980 - 2013]')
plt.axis('equal') # Sets the pie chart to look like a circle.
# add legend
plt.legend(labels=df_continents.index, loc='upper left')
plt.show() |
14,594 | 7830f535d19a039aad8696202c3876231e1a2415 | """
__author__: Kevin Chau
__description: Quantify RNA-seq samples with Kallisto
"""
import os
import subprocess
def quantify(kallisto, fq_dir, transcriptome):
"""Quantify the passed FASTQ files using a subprocess call to Kallisto
@param kallisto Path to kallisto executable
@param fq_dir Directory of the FASTQ input files
@param transcriptome Path to reference transcriptome
"""
try:
out = subprocess.check_output(
[
kallisto,
"OTHER STUFF"
]
)
except subprocess.CalledProcessError as e:
print(e)
return(1)
return(out)
if __name__ == '__main__':
pass
|
14,595 | c45234fcad6d0a0283b7926d891a029a2b31840f | from essentials import *
import pygame, sys
from pygame.locals import *
import types
class Instance:
#Start
def __init__ (self,name,resolution,background_color):
#Start
pygame.init()
self.screen = pygame.display.set_mode(resolution)
#Title
pygame.display.set_caption(name)
#Game clock
self.game_clock = pygame.time.Clock()
#Time per frame
self.delta_time = 0
#Initializing array
self.game_objects = []
self.background_color = background_color
#Start game
def Start(self,game_objects,update_method):
self.game_objects = game_objects
self.Update = types.MethodType(update_method,self)
self.Loop()
#Background stuff
def Loop(self):
while True:
#Frame rate
self.game_clock.tick(0)
#Background color
self.screen.fill(self.background_color)
#Game inputs
self.keys = pygame.key.get_pressed()
if self.game_clock.get_fps() > 0:
self.delta_time = 1/self.game_clock.get_fps()
#print(self.game_clock.get_fps())
self.Update()
for obj in self.game_objects:
for draw_func in obj.draw_functions:
draw_func.Draw(self.screen,obj)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
|
14,596 | 5581493f5c7455c64abee4f54f7850704ebd5a9e | from typing import List
class Solution:
def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:
rows = len(dungeon)
cols = len(dungeon[0])
dp = [[9999 for _ in range(cols + 1)] for _ in range(rows + 1)]
dp[rows][cols - 1] = 1
dp[rows - 1][cols] = 1
for i in range(rows - 1, -1, -1):
for j in range(cols - 1, -1, -1):
min_val = min(dp[i + 1][j], dp[i][j + 1])
dp[i][j] = max(min_val - dungeon[i][j], 1)
return dp[0][0]
|
14,597 | 0a3a4cd7019219bf719df8e30f4e1101b3d1a21f | def list_com(array):
new_array = []
for number in array:
if number % 2 == 0:
number = number ** 2
else:
number = int(number ** 0.5)
new_array.append(number)
return new_array
print(list_com([1, 56, 34, 78, 55, 90, 11, 73])) |
14,598 | d2d316fd509f6779a9614eceab036e4f28b6d1ab | """
Implement an alghorithm to determine if a sting has all unique characters.
What if you can't use additional data structures?
"""
def is_unique_n_2(string: str) -> bool:
"""
We take the first character and check for all letters if we found a copy of it.
This is going to take O(n ^ 2) time and O(1) space.
"""
for idx, letter in enumerate(string):
for next_letter in string[idx + 1:]:
if letter == next_letter:
return False
return True
def is_unique_n_lg(string: str) -> bool:
"""
We sort the string and take each neighbour. If they are equal, return False.
"""
start = 0
sorted_string = sorted(string)
while start + 1 < len(sorted_string):
if string[start] == string[start + 1]:
return False
start += 1
return True
def is_unique_n_dict(string: str) -> bool:
"""
Let's use a dict to store all occurences for every char.
"""
store = {}
for letter in string:
if letter in store:
return False
store[letter] = 1
return True
def is_unique_n_set(string: str) -> bool:
"""
Transform the string in a set and check it's length.
If it's different from the lenght of the string, return False
"""
return len(set(string)) == len(string)
def is_unique_n_bit_vector(string: str) -> bool:
"""
Similiar to the dict solution, it just uses a bit vector instead of a dict or array.
"""
vector = 0
for letter in string:
if vector & 1 << ord(letter):
return False
vector |= 1 << ord(letter)
return True
for test_case, expected_result in [
('asdzxc', True),
('111a', False),
(' ', True),
('', True),
('😁😁', False),
('😁🏘' ,True),
]:
assert is_unique_n_2(test_case) == expected_result
assert is_unique_n_lg(test_case) == expected_result
assert is_unique_n_dict(test_case) == expected_result
assert is_unique_n_set(test_case) == expected_result
assert is_unique_n_bit_vector(test_case) == expected_result
|
14,599 | c3aa0272e928821b6be23340e9c2a846003bd6ca | from .base import *
DEBUG = True
COMPRESS_OFFLINE = True
LIBSASS_OUTPUT_STYLE = 'compressed'
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.